diff --git "a/val.json" "b/val.json" new file mode 100644--- /dev/null +++ "b/val.json" @@ -0,0 +1,46622 @@ +[ + { + "library": "tensorflow", + "name": "serialize_object_graph_with_registered_savers", + "source_code": "def serialize_object_graph_with_registered_savers(graph_view, saveables_cache):\n return serialize_gathered_objects(graph_view, saveables_cache=saveables_cache)", + "docstring": "Determine checkpoint keys for variables and build a serialized graph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util_v1.py", + "ast_data": "FunctionDef name:serialize_object_graph_with_registered_savers arg:graph_view arg:saveables_cache arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "arange", + "source_code": "@tf_export.tf_export('experimental.numpy.arange', v1=[])\n@np_utils.np_doc('arange')\ndef arange(start, stop=None, step=1, dtype=None):\n if not step:\n raise ValueError('step must be non-zero.')\n if dtype:\n dtype = np_utils.result_type(dtype)\n elif stop is None:\n dtype = np_utils.result_type(start, step)\n else:\n dtype = np_utils.result_type(start, step, stop)\n if step > 0 and (stop is not None and start > stop or (stop is None and start < 0)):\n return array([], dtype=dtype)\n if step < 0 and (stop is not None and start < stop or (stop is None and start > 0)):\n return array([], dtype=dtype)\n return math_ops.cast(math_ops.range(start, limit=stop, delta=step), dtype=dtype)", + "docstring": "Returns -separated values in the range [start, stop). Args: start: Start of the interval. Included in the range. stop: End of the interval. If not specified, is treated as 0 and value is used as . If specified, it is not included in the range if is integer. When is floating point, it may or may not be included. step: The difference between 2 consecutive values in the output range. It is recommended to use instead of using non-integer values for . dtype: Optional. Type of the resulting ndarray. Could be a python type, a NumPy type or a TensorFlow . If not provided, the largest type of , , is used. Raises: ValueError: If step is zero.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py", + "ast_data": "FunctionDef name:arange arg:start arg:stop arg:step arg:dtype arguments arg arg arg arg If Raise Call If Assign Call If Compare Assign Call Assign Call If BoolOp Compare BoolOp BoolOp Compare Compare BoolOp Compare Compare Return return:yes Call If BoolOp Compare BoolOp BoolOp Compare Compare BoolOp Compare Compare Return return:yes Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "get_sharded_shape", + "source_code": "def get_sharded_shape(self, shape, shard_index=None):\n if self._shard_dimension is None or self._number_of_shards is None:\n return None\n if shard_index is not None:\n if shard_index < 0 or shard_index >= self.number_of_shards:\n raise ValueError(f'Requested shard_index {shard_index}, but shard_index must be in [0,{self._number_of_shards}).')\n shape = tensor_shape.as_shape(shape)\n if self._number_of_shards == 1:\n return shape\n ndims = shape.ndims\n if ndims is None:\n raise ValueError(f'Shape {shape} must be a known shape.')\n if ndims <= self._shard_dimension:\n raise ValueError(f'Shape {shape.as_list()} does not contain shard_dimension {self._shard_dimension}')\n dims = shape.as_list()\n if dims[self._shard_dimension] is None:\n raise ValueError(f'Shape {shape.as_list()} must have a fixed size for dimension {self._shard_dimension} that is known at construction time.')\n if dims[self._shard_dimension] % self._number_of_shards != 0:\n raise ValueError(f'Shape {shape.as_list()} cannot be sharded {self._number_of_shards} ways along dimension {self._shard_dimension}')\n dims[self._shard_dimension] //= self._number_of_shards\n return tensor_shape.TensorShape(dims)", + "docstring": "Returns the shape of a shard of a full Tensor. When given the shape of a 'full-size' Tensor, returns the shape of the sub-Tensor after it has been sharded. Freezes the policy if it has not yet been frozen. Args: shape: The shape of the full-size Tensor to be sharded. shard_index: The index of the shard whose shape should be returned. shard_index can be None for sharding policies that use the same shape for every shard. Returns: The shape of the sharded version of the Tensor. Raises: ValueError: If shard_index is None when shards are of different shapes; or shard_index is not None and !(0<=shard_index None:\n visitor = HighlightLanguageVisitor(self.document, self.config.highlight_language)\n self.document.walkabout(visitor)\n for node in list(self.document.findall(addnodes.highlightlang)):\n node.parent.remove(node)", + "docstring": "Apply highlight_language to all literal_block nodes. This refers both :confval: setting and :rst:dir: directive. After processing, this transform removes `` node from doctree.", + "type": "class", + "file_path": "sphinx\\sphinx\\transforms\\post_transforms\\code.py", + "ast_data": "ClassDef name:HighlightLanguageTransform Assign FunctionDef name:apply arg:self arguments arg arg Assign Call Call For Call Call Call" + }, + { + "library": "pytorch", + "name": "write_main", + "source_code": "def write_main(self, install_root, oss, symbol_name):\n with open(os.path.join(install_root, 'main.c'), 'w') as outfp:\n outfp.write(MAIN_INCLUDES)\n for m in self.frozen_modules:\n outfp.write(f'extern unsigned char {m.c_name}[];\\n')\n outfp.write(MAIN_PREFIX_TEMPLATE.format(symbol_name))\n for m in self.frozen_modules:\n outfp.write(f'\\t{{\"{m.module_name}\", {m.c_name}, {m.size}}},\\n')\n outfp.write(MAIN_SUFFIX)\n if oss:\n outfp.write(FAKE_PREFIX)\n outfp.write(MAIN_SUFFIX)", + "docstring": "Write the file containing a table enumerating all the frozen modules.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\_freeze.py", + "ast_data": "FunctionDef name:write_main arg:self arg:install_root arg:oss arg:symbol_name arguments arg arg arg arg With Call Call Call For Call Call Call For Call Call If Call Call" + }, + { + "library": "pandas", + "name": "getitem_block_columns", + "source_code": "@final\ndef getitem_block_columns(self, slicer: slice, new_mgr_locs: BlockPlacement, ref_inplace_op: bool=False) -> Self:\n new_values = self._slice(slicer)\n refs = self.refs if not ref_inplace_op or self.refs.has_reference() else None\n return type(self)(new_values, new_mgr_locs, self.ndim, refs=refs)", + "docstring": "Perform __getitem__-like, return result as block. Only supports slices that preserve dimensionality.", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\blocks.py", + "ast_data": "FunctionDef name:getitem_block_columns arg:self arg:slicer arg:new_mgr_locs arg:ref_inplace_op arguments arg arg arg arg Assign Call Assign BoolOp Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_rel_timestamps", + "source_code": "def get_rel_timestamps(self, node_name, output_slot, debug_op, device_name=None):\n device_name = self._infer_device_name(device_name, node_name)\n watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)\n if watch_key not in self._watch_key_to_datum[device_name]:\n raise WatchKeyDoesNotExistInDebugDumpDirError('Watch key \"%s\" does not exist in the debug dump' % watch_key)\n return self._watch_key_to_rel_time[device_name][watch_key]", + "docstring": "Get the relative timestamp from for a debug-dumped tensor. Relative timestamp means (absolute timestamp - ), where is the absolute timestamp of the first dumped tensor in the dump root. The tensor may be dumped multiple times in the dump root directory, so a list of relative timestamps () is returned. Args: node_name: () name of the node that the tensor is produced by. output_slot: () output slot index of tensor. debug_op: () name of the debug op. device_name: () name of the device. If there is only one device or if the specified debug_watch_key exists on only one device, this argument is optional. Returns: ( of ) list of relative timestamps. Raises: WatchKeyDoesNotExistInDebugDumpDirError: If the tensor watch key does not exist in the debug dump data.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", + "ast_data": "FunctionDef name:get_rel_timestamps arg:self arg:node_name arg:output_slot arg:debug_op arg:device_name arguments arg arg arg arg arg Assign Call Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "django", + "name": "check_rel_lookup_compatibility", + "source_code": "def check_rel_lookup_compatibility(model, target_opts, field):\n\n def check(opts):\n return model._meta.concrete_model == opts.concrete_model or opts.concrete_model in model._meta.all_parents or model in opts.all_parents\n return check(target_opts) or (getattr(field, 'primary_key', False) and check(field.model._meta))", + "docstring": "Check that self.model is compatible with target_opts. Compatibility is OK if: 1) model and opts match (where proxy inheritance is removed) 2) model is parent of opts' model or the other way around", + "type": "function", + "file_path": "django\\django\\db\\models\\query_utils.py", + "ast_data": "FunctionDef name:check_rel_lookup_compatibility arg:model arg:target_opts arg:field arguments arg arg arg FunctionDef name:check arg:opts arguments arg Return return:yes BoolOp Compare Compare Compare Return return:yes BoolOp Call BoolOp Call Call" + }, + { + "library": "scikit-learn", + "name": "_get_ordered_idx", + "source_code": "def _get_ordered_idx(self, mask_missing_values):\n frac_of_missing_values = mask_missing_values.mean(axis=0)\n if self.skip_complete:\n missing_values_idx = np.flatnonzero(frac_of_missing_values)\n else:\n missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0])\n if self.imputation_order == 'roman':\n ordered_idx = missing_values_idx\n elif self.imputation_order == 'arabic':\n ordered_idx = missing_values_idx[::-1]\n elif self.imputation_order == 'ascending':\n n = len(frac_of_missing_values) - len(missing_values_idx)\n ordered_idx = np.argsort(frac_of_missing_values, kind='mergesort')[n:]\n elif self.imputation_order == 'descending':\n n = len(frac_of_missing_values) - len(missing_values_idx)\n ordered_idx = np.argsort(frac_of_missing_values, kind='mergesort')[n:][::-1]\n elif self.imputation_order == 'random':\n ordered_idx = missing_values_idx\n self.random_state_.shuffle(ordered_idx)\n return ordered_idx", + "docstring": "Decide in what order we will update the features. As a homage to the MICE R package, we will have 4 main options of how to order the updates, and use a random order if anything else is specified. Also, this function skips features which have no missing values. Parameters ---------- mask_missing_values : array-like, shape (n_samples, n_features) Input data's missing indicator matrix, where is the number of samples and is the number of features. Returns ------- ordered_idx : ndarray, shape (n_features,) The order in which to impute the features.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\impute\\_iterative.py", + "ast_data": "FunctionDef name:_get_ordered_idx arg:self arg:mask_missing_values arguments arg arg Assign Call If Assign Call Assign Call Call If Compare Assign If Compare Assign If Compare Assign Call Call Assign Call If Compare Assign Call Call Assign Call If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "op_is_inside_loop", + "source_code": "def op_is_inside_loop(self, op):\n assert isinstance(op, ops.Operation)\n return op._id in self._pfor_op_ids", + "docstring": "True if op was created inside the pfor loop body.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:op_is_inside_loop arg:self arg:op arguments arg arg Call Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "to_code", + "source_code": "@tf_export('autograph.to_code', v1=[])\ndef to_code(entity, recursive=True, experimental_optional_features=None):\n source = tf_inspect.getsource(to_graph(entity, recursive=recursive, experimental_optional_features=experimental_optional_features))\n return textwrap.dedent(source)", + "docstring": "Returns the source code generated by AutoGraph, as a string. Example usage: >>> def f(x): ... if x >> tf.autograph.to_code(f) \"...def tf__f(x):...\" Also see: . Note: If a function has been decorated with , pass its underlying Python function, rather than the callable that Nonetf.autograph.experimental.Feature` value. Returns: The converted code as string.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py", + "ast_data": "FunctionDef name:to_code arg:entity arg:recursive arg:experimental_optional_features arguments arg arg arg Assign Call Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "aslinearoperator", + "source_code": "def aslinearoperator(A):\n if isinstance(A, LinearOperator):\n return A\n elif isinstance(A, np.ndarray) or isinstance(A, np.matrix):\n if A.ndim > 2:\n raise ValueError('array must have ndim <= 2')\n A = np.atleast_2d(np.asarray(A))\n return MatrixLinearOperator(A)\n elif issparse(A) or is_pydata_spmatrix(A):\n return MatrixLinearOperator(A)\n elif hasattr(A, 'shape') and hasattr(A, 'matvec'):\n rmatvec = None\n rmatmat = None\n dtype = None\n if hasattr(A, 'rmatvec'):\n rmatvec = A.rmatvec\n if hasattr(A, 'rmatmat'):\n rmatmat = A.rmatmat\n if hasattr(A, 'dtype'):\n dtype = A.dtype\n return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec, rmatmat=rmatmat, dtype=dtype)\n else:\n raise TypeError('type not understood')", + "docstring": "Return A as a LinearOperator. 'A' may be any of the following types: - ndarray - matrix - sparse array (e.g. csr_array, lil_array, etc.) - LinearOperator - An object with .shape and .matvec attributes See the LinearOperator documentation for additional information. Notes ----- If 'A' has no .dtype attribute, the data type is determined by calling :func: - set the .dtype attribute to prevent this call upon the linear operator creation. Examples -------- >>> import numpy as np >>> from scipy.sparse.linalg import aslinearoperator >>> M = np.array([[1,2,3],[4,5,6]], dtype=np.int32) >>> aslinearoperator(M)", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py", + "ast_data": "FunctionDef name:aslinearoperator arg:A arguments arg If Call Return return:yes If BoolOp Call Call If Compare Raise Call Assign Call Call Return return:yes Call If BoolOp Call Call Return return:yes Call If BoolOp Call Call Assign Assign Assign If Call Assign If Call Assign If Call Assign Return return:yes Call Raise Call" + }, + { + "library": "pytorch", + "name": "_fx_collection_equivalence_fn", + "source_code": "def _fx_collection_equivalence_fn(spec1_type: Optional[type], spec1_context: pytree.Context, spec2_type: Optional[type], spec2_context: pytree.Context) -> bool:\n if spec1_type is None or spec2_type is None:\n return spec1_type is spec2_type and spec1_context == spec2_context\n if issubclass(spec1_type, (dict, immutable_dict)) and issubclass(spec2_type, (dict, immutable_dict)):\n return spec1_context == spec2_context\n if issubclass(spec1_type, (list, immutable_list)) and issubclass(spec2_type, (list, immutable_list)):\n return spec1_context == spec2_context\n return spec1_type is spec2_type and spec1_context == spec2_context", + "docstring": "Treat containers and their immutable variants as the same type. Otherwise compare as normal.", + "type": "function", + "file_path": "pytorch\\torch\\export\\exported_program.py", + "ast_data": "FunctionDef name:_fx_collection_equivalence_fn arg:spec1_type arg:spec1_context arg:spec2_type arg:spec2_context arguments arg arg arg arg If BoolOp Compare Compare Return return:yes BoolOp Compare Compare If BoolOp Call Call Return return:yes Compare If BoolOp Call Call Return return:yes Compare Return return:yes BoolOp Compare Compare" + }, + { + "library": "tensorflow", + "name": "creator_with_resource_vars", + "source_code": "def creator_with_resource_vars(next_creator, **kwargs):\n if ops.inside_function():\n if_graph_building = 'graph_building'\n else:\n if_graph_building = 'not_graph_building'\n with monitoring.MonitoredTimer(distributed_variable_creation_time_counter.get_cell(strategy.__class__.__name__, if_graph_building)):\n _require_strategy_scope_extended(self)\n kwargs['use_resource'] = True\n kwargs['distribute_strategy'] = strategy\n if isinstance(kwargs['initial_value'], trackable.CheckpointInitialValue):\n checkpoint_restore_uid = kwargs['initial_value'].checkpoint_position.restore_uid\n kwargs['initial_value'] = kwargs['initial_value'].wrapped_value\n elif isinstance(kwargs['initial_value'], trackable.CheckpointInitialValueCallable):\n checkpoint_restore_uid = kwargs['initial_value'].checkpoint_position.restore_uid\n elif isinstance(kwargs['initial_value'], functools.partial) and isinstance(kwargs['initial_value'].func, trackable.CheckpointInitialValueCallable):\n checkpoint_restore_uid = kwargs['initial_value'].func.checkpoint_position.restore_uid\n else:\n checkpoint_restore_uid = None\n created = self._create_variable(next_creator, **kwargs)\n if checkpoint_restore_uid is not None:\n created._maybe_initialize_trackable()\n created._update_uid = checkpoint_restore_uid\n return created", + "docstring": "Variable creator to use in .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:creator_with_resource_vars arg:next_creator arguments arg arg If Call Assign Assign With Call Call Call Assign Assign If Call Assign Assign If Call Assign If BoolOp Call Call Assign Assign Assign Call If Compare Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "__call__", + "source_code": "def __call__(self, y_true, y_pred, sample_weight=None):\n graph_ctx = tf_utils.graph_context_for_symbolic_tensors(y_true, y_pred, sample_weight)\n with backend.name_scope(self._name_scope), graph_ctx:\n if context.executing_eagerly():\n call_fn = self.call\n else:\n call_fn = autograph.tf_convert(self.call, ag_ctx.control_status_ctx())\n losses = call_fn(y_true, y_pred)\n return losses_utils.compute_weighted_loss(losses, sample_weight, reduction=self._get_reduction())", + "docstring": "Invokes the instance. Args: y_true: Ground truth values. shape = , except sparse loss functions such as sparse categorical crossentropy where shape = y_pred: The predicted values. shape = sample_weight: Optional acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If is a tensor of size , then the total loss for each sample of the batch is rescaled by the corresponding element in the vector. If the shape of is (or can be broadcasted to this shape), then each loss element of is scaled by the corresponding value of . (Note on: all loss functions reduce by 1 dimension, usually axis=-1.) Returns: Weighted loss float . If is , this has shape ; otherwise, it is scalar. (Note because all loss functions reduce by 1 dimension, usually axis=-1.) Raises: ValueError: If the shape of is invalid.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg arg Assign Call With Call If Call Assign Assign Call Call Assign Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "entropy", + "source_code": "def entropy(self, n, p):\n n, p, npcond = self._process_parameters(n, p)\n x = np.r_[1:np.max(n) + 1]\n term1 = n * np.sum(entr(p), axis=-1)\n term1 -= gammaln(n + 1)\n n = n[..., np.newaxis]\n new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1\n x.shape += (1,) * new_axes_needed\n term2 = np.sum(binom.pmf(x, n, p) * gammaln(x + 1), axis=(-1, -1 - new_axes_needed))\n return self._checkresult(term1 + term2, npcond, np.nan)", + "docstring": "Compute the entropy of the multinomial distribution. The entropy is computed using this expression: .. math:: f(x) = - \\log n! - n\\sum_{i=1}^k p_i \\log p_i + \\sum_{i=1}^k \\sum_{x=0}^n \\binom n x p_i^x(1-p_i)^{n-x} \\log x! Parameters ---------- %(_doc_default_callparams)s Returns ------- h : scalar Entropy of the Multinomial distribution Notes ----- %(_doc_callparams_note)s", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:entropy arg:self arg:n arg:p arguments arg arg arg Assign Call Assign Call Assign Call Call Call Assign Assign Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "adjoint", + "source_code": "def adjoint(self) -> Tensor:\n batch_size = len(self.z) if len(self.z.shape) > 0 else None\n return self.identity(batch_size, self.z.device, self.z.real.dtype).matrix()", + "docstring": "Return the adjoint matrix of shape :math:. Example: >>> s = So2.identity() >>> s.adjoint() tensor([[1., -0.], [0., 1.]], grad_fn=)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py", + "ast_data": "FunctionDef name:adjoint arg:self arguments arg Assign Compare Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "lazy_deprecated_import", + "source_code": "def lazy_deprecated_import(all: list[str], old_module: str, new_module: str) -> Callable:\n warning_message = _MESSAGE_TEMPLATE.format(old_location=old_module, new_location=new_module)\n\n def getattr_dunder(name: str) -> None:\n if name in all:\n warnings.warn(warning_message, RuntimeWarning)\n package = importlib.import_module(new_module)\n return getattr(package, name)\n raise AttributeError(f'Module {new_module!r} has no attribute {name!r}.')\n return getattr_dunder", + "docstring": "Import utility to lazily import deprecated packages / modules / functional. The old_module and new_module are also used in the deprecation warning defined by the . Args: all: The list of the functions that are imported. Generally, the module's __all__ list of the module. old_module: Old module location new_module: New module location / Migrated location Returns: Callable to assign to the Usage: # In the from torch.nn.utils._deprecation_utils import lazy_deprecated_import _MIGRATED_TO = \"torch.ao.nn.quantized.functional\" __getattr__ = lazy_deprecated_import( all=__all__, old_module=__name__, new_module=_MIGRATED_TO)", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\_deprecation_utils.py", + "ast_data": "FunctionDef name:lazy_deprecated_import arg:all arg:old_module arg:new_module arguments arg arg arg Assign Call FunctionDef name:getattr_dunder arg:name arguments arg If Compare Call Assign Call Return return:yes Call Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "variable_capturing_scope", + "source_code": "def variable_capturing_scope(next_creator, **kwds):\n enable_variable_lifting = kwds.get('experimental_enable_variable_lifting')\n if enable_variable_lifting is None:\n enable_variable_lifting = True\n if not enable_variable_lifting:\n return next_creator(**kwds)\n v = UnliftedInitializerVariable(add_initializers_to=add_initializers_to, **kwds)\n created_variables.append(weakref.ref(v))\n return v", + "docstring": "Creates UnliftedInitializerVariables and saves references to them.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py", + "ast_data": "FunctionDef name:variable_capturing_scope arg:next_creator arguments arg arg Assign Call If Compare Assign If Return return:yes Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_init_from_metadata", + "source_code": "@classmethod\ndef _init_from_metadata(cls, metadata):\n revived_obj = cls(name=metadata['name'])\n with utils.no_automatic_dependency_tracking_scope(revived_obj):\n revived_obj._expects_training_arg = metadata['expects_training_arg']\n config = metadata.get('config')\n if generic_utils.validate_config(config):\n revived_obj._config = config\n if metadata.get('activity_regularizer') is not None:\n revived_obj.activity_regularizer = regularizers.deserialize(metadata['activity_regularizer'])\n return (revived_obj, _revive_setter)", + "docstring": "Create revived network from metadata stored in the SavedModel proto.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py", + "ast_data": "FunctionDef name:_init_from_metadata arg:cls arg:metadata arguments arg arg Assign Call With Call Assign Assign Call If Call Assign If Compare Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "cross_product_matrix", + "source_code": "def cross_product_matrix(x: torch.Tensor) -> torch.Tensor:\n if not x.shape[-1] == 3:\n raise AssertionError(x.shape)\n x0 = x[..., 0]\n x1 = x[..., 1]\n x2 = x[..., 2]\n zeros = zeros_like(x0)\n cross_product_matrix_flat = stack([zeros, -x2, x1, x2, zeros, -x0, -x1, x0, zeros], dim=-1)\n shape_ = x.shape[:-1] + (3, 3)\n return cross_product_matrix_flat.view(*shape_)", + "docstring": "Return the cross_product_matrix symmetric matrix of a vector. Args: x: The input vector to construct the matrix in the shape :math:. Returns: The constructed cross_product_matrix symmetric matrix with shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\epipolar\\numeric.py", + "ast_data": "FunctionDef name:cross_product_matrix arg:x arguments arg If Compare Raise Call Assign Assign Assign Assign Call Assign Call Assign Return return:yes Call" + }, + { + "library": "django", + "name": "get_urls", + "source_code": "def get_urls(self, page=1, site=None, protocol=None):\n urls = Sitemap.get_urls(self, page=page, site=site, protocol=protocol)\n for url in urls:\n url['geo_format'] = self.geo_format\n return urls", + "docstring": "This method is overridden so the appropriate attribute is placed on each URL element.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\sitemaps\\kml.py", + "ast_data": "FunctionDef name:get_urls arg:self arg:page arg:site arg:protocol arguments arg arg arg arg Assign Call For Assign Return return:yes" + }, + { + "library": "django", + "name": "_simple_domain_name_validator", + "source_code": "def _simple_domain_name_validator(value):\n checks = (s in value for s in string.whitespace)\n if any(checks):\n raise ValidationError(_('The domain name cannot contain any spaces or tabs.'), code='invalid')", + "docstring": "Validate that the given value contains no whitespaces to prevent common typos.", + "type": "function", + "file_path": "django\\django\\contrib\\sites\\models.py", + "ast_data": "FunctionDef name:_simple_domain_name_validator arg:value arguments arg Assign Compare If Call Raise Call Call" + }, + { + "library": "matplotlib", + "name": "post_gist", + "source_code": "def post_gist(content, description='', filename='file', auth=False):\n post_data = json.dumps({'description': description, 'public': True, 'files': {filename: {'content': content}}}).encode('utf-8')\n headers = make_auth_header() if auth else {}\n response = requests.post('https://api.github.com/gists', data=post_data, headers=headers)\n response.raise_for_status()\n response_data = json.loads(response.text)\n return response_data['html_url']", + "docstring": "Post some text to a Gist, and return the URL.", + "type": "function", + "file_path": "matplotlib\\tools\\gh_api.py", + "ast_data": "FunctionDef name:post_gist arg:content arg:description arg:filename arg:auth arguments arg arg arg arg Assign Call Call Assign Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "unnecessary_dtype_convert", + "source_code": "@register_graph_pattern(CallFunction(torch.ops.prims.convert_element_type.default, Ignored(), KeywordArg('dtype')), pass_dict=pass_patterns[0], extra_check=same_dtype)\ndef unnecessary_dtype_convert(match: Match, **kwargs):\n graph = match.graph\n node = match.output_node()\n node.replace_all_uses_with(node.args[0])\n graph.erase_node(node)", + "docstring": "Remove unnecessary dtype conversion op, probably left as a result of Conv-Bn folding", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\freezing_patterns.py", + "ast_data": "FunctionDef name:unnecessary_dtype_convert arg:match arguments arg arg Assign Assign Call Call Call Call Call Call Call" + }, + { + "library": "numpy", + "name": "__getstate__", + "source_code": "def __getstate__(self):\n state = (1, self.shape, self.dtype, self.flags.fnc, self._data.tobytes(), self._mask.tobytes(), self._fill_value)\n return state", + "docstring": "Return the internal state of the masked array. This is for pickling.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\mrecords.py", + "ast_data": "FunctionDef name:__getstate__ arg:self arguments arg Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "_should_fallback_to_positional", + "source_code": "@cache_readonly\ndef _should_fallback_to_positional(self) -> bool:\n return self.inferred_type not in {'integer', 'mixed-integer', 'floating', 'complex'}", + "docstring": "Should an integer key be treated as positional?", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_should_fallback_to_positional arg:self arguments arg Return return:yes Compare" + }, + { + "library": "django", + "name": "dims", + "source_code": "@property\ndef dims(self):\n return capi.get_dims(self.ptr)", + "docstring": "Return the dimension of this Geometry (0=point, 1=line, 2=surface).", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:dims arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_yaxis_text1_transform", + "source_code": "def get_yaxis_text1_transform(self, pad_points):\n labels_align = mpl.rcParams['ytick.alignment']\n return (self.get_yaxis_transform(which='tick1') + mtransforms.ScaledTranslation(-1 * pad_points / 72, 0, self.get_figure(root=False).dpi_scale_trans), labels_align, 'right')", + "docstring": "Returns ------- transform : Transform The transform used for drawing y-axis labels, which will add *pad_points* of padding (in points) between the axis and the label. The x-direction is in axis coordinates and the y-direction is in data coordinates valign : {'center', 'top', 'bottom', 'baseline', 'center_baseline'} The text vertical alignment. halign : {'center', 'left', 'right'} The text horizontal alignment. Notes ----- This transformation is primarily used by the class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:get_yaxis_text1_transform arg:self arg:pad_points arguments arg arg Assign Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "pts_to_prestep", + "source_code": "def pts_to_prestep(x, *args):\n steps = np.zeros((1 + len(args), max(2 * len(x) - 1, 0)))\n steps[0, 0::2] = x\n steps[0, 1::2] = steps[0, 0:-2:2]\n steps[1:, 0::2] = args\n steps[1:, 1::2] = steps[1:, 2::2]\n return steps", + "docstring": "Convert continuous line to pre-steps. Given a set of ``, the length will be 0. Examples -------- >>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:pts_to_prestep arg:x arguments arg arg Assign Call Call Call Call Assign Assign Assign Assign Return return:yes" + }, + { + "library": "scipy", + "name": "asterisk_repl", + "source_code": "def asterisk_repl(matchobj):\n code = matchobj.group(1).replace('\\\\*', '*')\n return '``' + code + '``'", + "docstring": "repl to un-escape asterisks in code blocks", + "type": "function", + "file_path": "scipy\\tools\\gh_lists.py", + "ast_data": "FunctionDef name:asterisk_repl arg:matchobj arguments arg Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "num_coords", + "source_code": "@property\ndef num_coords(self):\n return capi.get_num_coords(self.ptr)", + "docstring": "Return the number of coordinates in the Geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:num_coords arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "truncate", + "source_code": "def truncate(self, before=None, after=None) -> MultiIndex:\n if after and before and (after < before):\n raise ValueError('after < before')\n i, j = self.levels[0].slice_locs(before, after)\n left, right = self.slice_locs(before, after)\n new_levels = list(self.levels)\n new_levels[0] = new_levels[0][i:j]\n new_codes = [level_codes[left:right] for level_codes in self.codes]\n new_codes[0] = new_codes[0] - i\n return MultiIndex(levels=new_levels, codes=new_codes, names=self._names, verify_integrity=False)", + "docstring": "Slice index between two labels / tuples, return new MultiIndex. Parameters ---------- before : label or tuple, can be partial. Default None None defaults to start. after : label or tuple, can be partial. Default None None defaults to end. Returns ------- MultiIndex The truncated MultiIndex. See Also -------- DataFrame.truncate : Truncate a DataFrame before and after some index values. Series.truncate : Truncate a Series before and after some index values. Examples -------- >>> mi = pd.MultiIndex.from_arrays([[\"a\", \"b\", \"c\"], [\"x\", \"y\", \"z\"]]) >>> mi MultiIndex([('a', 'x'), ('b', 'y'), ('c', 'z')], ) >>> mi.truncate(before=\"a\", after=\"b\") MultiIndex([('a', 'x'), ('b', 'y')], )", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "FunctionDef name:truncate arg:self arg:before arg:after arguments arg arg arg If BoolOp Compare Raise Call Assign Call Assign Call Assign Call Assign Assign Assign Return return:yes Call" + }, + { + "library": "django", + "name": "get_changelist", + "source_code": "def get_changelist(self, request, **kwargs):\n from django.contrib.admin.views.main import ChangeList\n return ChangeList", + "docstring": "Return the ChangeList class for use on the changelist page.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:get_changelist arg:self arg:request arguments arg arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_size", + "source_code": "def _size(t, dtype=None):\n size = t.get_shape().num_elements() if isinstance(t, tensor_lib.Tensor) else None\n return array_ops.size(t, out_type=dtype) if size is None else size", + "docstring": "Returns size as an integer (when statically known) or as a tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:_size arg:t arg:dtype arguments arg arg Assign Call Call Call Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "_validate_signature_def_map", + "source_code": "def _validate_signature_def_map(self, signature_def_map):\n for signature_def_key in signature_def_map:\n signature_def = signature_def_map[signature_def_key]\n inputs = signature_def.inputs\n outputs = signature_def.outputs\n for inputs_key in inputs:\n self._validate_tensor_info(inputs[inputs_key])\n for outputs_key in outputs:\n self._validate_tensor_info(outputs[outputs_key])\n if constants.INIT_OP_SIGNATURE_KEY in signature_def_map:\n raise KeyError(f'SignatureDef map key \"{constants.INIT_OP_SIGNATURE_KEY}\" is reserved for initialization. Please use a different key.')\n if constants.TRAIN_OP_SIGNATURE_KEY in signature_def_map:\n raise KeyError(f'SignatureDef map key \"{constants.TRAIN_OP_SIGNATURE_KEY}\" is reserved for the train op. Please use a different key.')", + "docstring": "Validates the entries in the signature def map. Validation of entries in the signature def map includes ensuring that the and fields of the TensorInfo protos of the and of each are populated. Also ensures that reserved SignatureDef keys for the initialization and train ops are not used. Args: signature_def_map: The map of signature defs to be validated. Raises: AssertionError: If a TensorInfo is not valid. KeyError: If a reserved signature key is used in the map.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py", + "ast_data": "FunctionDef name:_validate_signature_def_map arg:self arg:signature_def_map arguments arg arg For Assign Assign Assign For Call For Call If Compare Raise Call If Compare Raise Call" + }, + { + "library": "pandas", + "name": "_from_factorized", + "source_code": "@classmethod\ndef _from_factorized(cls, values, original):\n raise AbstractMethodError(cls)", + "docstring": "Reconstruct an ExtensionArray after factorization. Parameters ---------- values : ndarray An integer ndarray with the factorized values. original : ExtensionArray The original ExtensionArray that factorize was called on. See Also -------- factorize : Top-level factorize method that dispatches here. ExtensionArray.factorize : Encode the extension array as an enumerated type. Examples -------- >>> interv_arr = pd.arrays.IntervalArray( ... [pd.Interval(0, 1), pd.Interval(1, 5), pd.Interval(1, 5)] ... ) >>> codes, uniques = pd.factorize(interv_arr) >>> pd.arrays.IntervalArray._from_factorized(uniques, interv_arr) [(0, 1], (1, 5]] Length: 2, dtype: interval[int64, right]", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:_from_factorized arg:cls arg:values arg:original arguments arg arg arg Raise Call" + }, + { + "library": "pandas", + "name": "select", + "source_code": "def select(self):\n if self.condition is not None:\n return self.table.table.read_where(self.condition.format(), start=self.start, stop=self.stop)\n elif self.coordinates is not None:\n return self.table.table.read_coordinates(self.coordinates)\n return self.table.table.read(start=self.start, stop=self.stop)", + "docstring": "generate the selection", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:select arg:self arguments arg If Compare Return return:yes Call Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "authlib", + "name": "add_to_body", + "source_code": "def add_to_body(token, body=None):\n if body is None:\n body = ''\n return add_params_to_qs(body, [('access_token', token)])", + "docstring": "Add a Bearer Token to the request body. access_token=h480djs93hd8", + "type": "function", + "file_path": "authlib\\authlib\\oauth2\\rfc6750\\parameters.py", + "ast_data": "FunctionDef name:add_to_body arg:token arg:body arguments arg arg If Compare Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "_getcol", + "source_code": "def _getcol(self, i):\n M, N = self.shape\n i = int(i)\n if i < 0:\n i += N\n if i < 0 or i >= N:\n raise IndexError(f'index ({i}) out of range')\n return self._get_submatrix(major=i, copy=True)", + "docstring": "Returns a copy of column i of the matrix, as a (m x 1) CSC matrix (column vector).", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_csc.py", + "ast_data": "FunctionDef name:_getcol arg:self arg:i arguments arg arg Assign Assign Call If Compare If BoolOp Compare Compare Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "reduce_weighted_loss", + "source_code": "def reduce_weighted_loss(weighted_losses, reduction=ReductionV2.SUM_OVER_BATCH_SIZE):\n if reduction == ReductionV2.NONE:\n loss = weighted_losses\n else:\n loss = math_ops.reduce_sum(weighted_losses)\n if reduction == ReductionV2.SUM_OVER_BATCH_SIZE:\n loss = _safe_mean(loss, _num_elements(weighted_losses))\n return loss", + "docstring": "Reduces the individual weighted loss measurements.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\losses_utils.py", + "ast_data": "FunctionDef name:reduce_weighted_loss arg:weighted_losses arg:reduction arguments arg arg If Compare Assign Assign Call If Compare Assign Call Call Return return:yes" + }, + { + "library": "scrapy", + "name": "text", + "source_code": "@property\ndef text(self) -> str:\n raise AttributeError(\"Response content isn't text\")", + "docstring": "For subclasses of TextResponse, this will return the body as str", + "type": "method", + "file_path": "scrapy\\scrapy\\http\\response\\__init__.py", + "ast_data": "FunctionDef name:text arg:self arguments arg Raise Call" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, img: Tensor, lafs: Tensor) -> Tensor:\n return get_laf_descriptors(img, lafs, self.descriptor, self.patch_size, self.grayscale_descriptor)", + "docstring": "Three stage local feature detection. First the location and scale of interest points are determined by detect function. Then affine shape and orientation. Args: img: image features with shape :math:. lafs: local affine frames :math:. Returns: Local descriptors of shape :math: where :math: is descriptor size.", + "type": "method", + "file_path": "kornia\\kornia\\feature\\integrated.py", + "ast_data": "FunctionDef name:forward arg:self arg:img arg:lafs arguments arg arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "theta", + "source_code": "@property\ndef theta(self):\n return np.hstack([kernel.theta for kernel in self.kernels])", + "docstring": "Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : ndarray of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:theta arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "run_with_hooks", + "source_code": "def run_with_hooks(self, *args, **kwargs):\n return self._run_with_hooks_fn(*args, **kwargs)", + "docstring": "Same as . Accepts the same arguments.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py", + "ast_data": "FunctionDef name:run_with_hooks arg:self arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "sample", + "source_code": "def sample(self, sample_shape=torch.Size()):\n with torch.no_grad():\n x = self.base_dist.sample(sample_shape)\n for transform in self.transforms:\n x = transform(x)\n return x", + "docstring": "Generates a sample_shape shaped sample or sample_shape shaped batch of samples if the distribution parameters are batched. Samples first from base distribution and applies for every transform in the list.", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\transformed_distribution.py", + "ast_data": "FunctionDef name:sample arg:self arg:sample_shape arguments arg arg Call With Call Assign Call For Assign Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "SvcStop", + "source_code": "def SvcStop(self):\n from cherrypy import process\n self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)\n process.bus.exit()", + "docstring": "Stop the service.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\win32.py", + "ast_data": "FunctionDef name:SvcStop arg:self arguments arg Call Call" + }, + { + "library": "pytorch", + "name": "append_step", + "source_code": "def append_step(self, step: OutputAdaptStep) -> None:\n self._steps.append(step)", + "docstring": "Appends a step to the output format steps. Args: step: The step to append.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py", + "ast_data": "FunctionDef name:append_step arg:self arg:step arguments arg arg Call" + }, + { + "library": "tensorflow", + "name": "transform_feature", + "source_code": "def transform_feature(self, transformation_cache, state_manager):\n input_tensor = transformation_cache.get(self.key, state_manager)\n return self._transform_input_tensor(input_tensor)", + "docstring": "See base class. In this case, we apply the to the input tensor. Args: transformation_cache: A object to access features. state_manager: A to create / access resources such as lookup tables. Returns: Normalized input tensor. Raises: ValueError: If a SparseTensor is passed in.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:transform_feature arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "compute_output_signature", + "source_code": "@doc_controls.for_subclass_implementers\ndef compute_output_signature(self, input_signature):\n\n def check_type_return_shape(s):\n if not isinstance(s, tensor.TensorSpec):\n raise TypeError('Only TensorSpec signature types are supported, but saw signature entry: {}.'.format(s))\n return s.shape\n input_shape = nest.map_structure(check_type_return_shape, input_signature)\n output_shape = self.compute_output_shape(input_shape)\n dtype = self._compute_dtype\n if dtype is None:\n input_dtypes = [s.dtype for s in nest.flatten(input_signature)]\n dtype = input_dtypes[0]\n return nest.map_structure(lambda s: tensor.TensorSpec(dtype=dtype, shape=s), output_shape)", + "docstring": "Compute the output tensor signature of the layer based on the inputs. Unlike a TensorShape object, a TensorSpec object contains both shape and dtype information for a tensor. This method allows layers to provide output dtype information if it is different from the input dtype. For any layer that doesn't implement this function, the framework will fall back to use , and will assume that the output dtype matches the input dtype. Args: input_signature: Single TensorSpec or nested structure of TensorSpec objects, describing a candidate input for the layer. Returns: Single TensorSpec or nested structure of TensorSpec objects, describing how the layer would transform the provided input. Raises: TypeError: If input_signature contains a non-TensorSpec object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", + "ast_data": "FunctionDef name:compute_output_signature arg:self arg:input_signature arguments arg arg FunctionDef name:check_type_return_shape arg:s arguments arg If Call Raise Call Call Return return:yes Assign Call Assign Call Assign If Compare Assign Call Assign Return return:yes Call arguments arg Call" + }, + { + "library": "pytorch", + "name": "_move_states_to_device", + "source_code": "def _move_states_to_device(params: list[nn.Parameter], buffers: list[torch.Tensor], device_from_device_id: Optional[torch.device]) -> None:\n if len(params) == 0 and len(buffers) == 0:\n return\n if len(params) > 0:\n current_device = params[0].device\n elif len(buffers) > 0:\n current_device = buffers[0].device\n cpu_device = torch.device('cpu')\n if device_from_device_id is not None:\n for param in params:\n with torch.no_grad():\n param.data = param.to(device_from_device_id)\n if param.grad is not None:\n param.grad.data = param.grad.to(device_from_device_id)\n for buffer in buffers:\n buffer.data = buffer.to(device_from_device_id)\n elif current_device == cpu_device:\n _warn_cpu_init()", + "docstring": "Move states to the specified device. Precondition: `` and module's parameters and buffers have been materialized if needed.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py", + "ast_data": "FunctionDef name:_move_states_to_device arg:params arg:buffers arg:device_from_device_id arguments arg arg arg If BoolOp Compare Call Compare Call Return return:no If Compare Call Assign If Compare Call Assign Assign Call If Compare For With Call Assign Call If Compare Assign Call For Assign Call If Compare Call" + }, + { + "library": "authlib", + "name": "register_client_auth_method", + "source_code": "def register_client_auth_method(self, auth):\n if isinstance(auth, tuple):\n self._auth_methods[auth[0]] = auth[1]\n else:\n self._auth_methods[auth.name] = auth", + "docstring": "Extend client authenticate for token endpoint. :param auth: an instance to sign the request", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\client.py", + "ast_data": "FunctionDef name:register_client_auth_method arg:self arg:auth arguments arg arg If Call Assign Assign" + }, + { + "library": "django", + "name": "data", + "source_code": "@property\ndef data(self):\n return self.form._widget_data_value(self.field.widget, self.html_name)", + "docstring": "Return the data for this BoundField, or None if it wasn't given.", + "type": "method", + "file_path": "django\\django\\forms\\boundfield.py", + "ast_data": "FunctionDef name:data arg:self arguments arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "get_info", + "source_code": "def get_info(self, notfound_action=0):\n flag = 0\n if not self.has_info():\n flag = 1\n log.info(self.__class__.__name__ + ':')\n if hasattr(self, 'calc_info'):\n self.calc_info()\n if notfound_action:\n if not self.has_info():\n if notfound_action == 1:\n warnings.warn(self.notfounderror.__doc__, stacklevel=2)\n elif notfound_action == 2:\n raise self.notfounderror(self.notfounderror.__doc__)\n else:\n raise ValueError(repr(notfound_action))\n if not self.has_info():\n log.info(' NOT AVAILABLE')\n self.set_info()\n else:\n log.info(' FOUND:')\n res = self.saved_results.get(self.__class__.__name__)\n if log.get_threshold() <= log.INFO and flag:\n for k, v in res.items():\n v = str(v)\n if k in ['sources', 'libraries'] and len(v) > 270:\n v = v[:120] + '...\\n...\\n...' + v[-120:]\n log.info(' %s = %s', k, v)\n log.info('')\n return copy.deepcopy(res)", + "docstring": "Return a dictionary with items that are compatible with numpy.distutils.setup keyword arguments.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\system_info.py", + "ast_data": "FunctionDef name:get_info arg:self arg:notfound_action arguments arg arg Assign If Call Assign Call If Call Call If If Call If Compare Call If Compare Raise Call Raise Call Call If Call Call Call Call Assign Call If BoolOp Compare Call For Call Assign Call If BoolOp Compare Compare Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "dtensor_reduce", + "source_code": "def dtensor_reduce(strategy, reduce_op, value, axis):\n distribute_lib._require_cross_replica_or_default_context_extended(strategy.extended)\n if isinstance(reduce_op, str):\n reduce_op = reduce_util.ReduceOp(reduce_op.upper())\n distributed_input = is_distributed_value(value)\n if not distributed_input and axis is None:\n destinations = device_util.current() or strategy.extended._default_device or '/device:CPU:0'\n devices = cross_device_ops_lib.get_devices_from(destinations)\n with ops.device(devices[0]):\n return array_ops.identity(cross_device_ops_lib.reduce_non_distributed_value(reduce_op, value, destinations, strategy.num_replicas_in_sync))\n value = convert_inputs_to_dtensor(value, strategy._mesh)\n if reduce_op == reduce_util.ReduceOp.MEAN:\n reduce_op = math_ops.reduce_mean\n else:\n reduce_op = math_ops.reduce_sum\n if d_api.fetch_layout(value).is_fully_replicated():\n if axis is not None:\n value = reduce_op(value, axis=axis)\n else:\n new_shape = [strategy.num_replicas_in_sync, -1]\n if len(value.shape) > 1:\n new_shape.extend(array_ops.shape(value)[1:])\n value = array_ops.reshape(value, new_shape)\n if axis is not None:\n value = reduce_op(value, axis=axis + 1)\n value = reduce_op(value, axis=0)\n return value", + "docstring": "Implement dtensor based strategy.reduce().", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\dtensor_util.py", + "ast_data": "FunctionDef name:dtensor_reduce arg:strategy arg:reduce_op arg:value arg:axis arguments arg arg arg arg Call If Call Assign Call Call Assign Call If BoolOp Compare Assign BoolOp Call Assign Call With Call Return return:yes Call Call Assign Call If Compare Assign Assign If Call Call If Compare Assign Call Assign If Compare Call Call Call Assign Call If Compare Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "matrix_diag_transform", + "source_code": "def matrix_diag_transform(matrix, transform=None, name=None):\n with ops.name_scope(name, 'matrix_diag_transform', [matrix]):\n matrix = ops.convert_to_tensor(matrix, name='matrix')\n if transform is None:\n return matrix\n diag = array_ops.matrix_diag_part(matrix)\n transformed_diag = transform(diag)\n transformed_mat = array_ops.matrix_set_diag(matrix, transformed_diag)\n return transformed_mat", + "docstring": "Transform diagonal of [batch-]matrix, leave rest of matrix unchanged. Create a trainable covariance defined by a Cholesky factor: Example of heteroskedastic 2-D linear regression. Args: matrix: Rank , , where the last two dimensions are equal. transform: Element-wise function mapping to . To be applied to the diagonal of . If , is returned unchanged. Defaults to . name: A name to give created ops. Defaults to \"matrix_diag_transform\". Returns: A with same shape and as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py", + "ast_data": "FunctionDef name:matrix_diag_transform arg:matrix arg:transform arg:name arguments arg arg arg With Call Assign Call If Compare Return return:yes Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "__init__", + "source_code": "def __init__(self, sk, yk):\n if sk.shape != yk.shape or sk.ndim != 2:\n raise ValueError('sk and yk must have matching shape, (n_corrs, n)')\n n_corrs, n = sk.shape\n super().__init__(dtype=np.float64, shape=(n, n))\n self.sk = sk\n self.yk = yk\n self.n_corrs = n_corrs\n self.rho = 1 / np.einsum('ij,ij->i', sk, yk)", + "docstring": "Construct the operator.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_lbfgsb_py.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:sk arg:yk arguments arg arg arg If BoolOp Compare Compare Raise Call Assign Call Call Assign Assign Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "make_test_function", + "source_code": "def make_test_function(self):\n if self.test_function is not None:\n return self.test_function\n\n def step_function(model, iterator):\n\n def run_step(data):\n outputs = model.test_step(data)\n with ops.control_dependencies(_minimum_control_deps(outputs)):\n model._test_counter.assign_add(1)\n return outputs\n data = next(iterator)\n outputs = model.distribute_strategy.run(run_step, args=(data,))\n outputs = reduce_per_replica(outputs, self.distribute_strategy, reduction='first')\n return outputs\n if self._steps_per_execution.numpy().item() == 1:\n\n def test_function(iterator):\n return step_function(self, iterator)\n else:\n\n def test_function(iterator):\n for _ in math_ops.range(self._steps_per_execution):\n outputs = step_function(self, iterator)\n return outputs\n if not self.run_eagerly:\n test_function = def_function.function(test_function, experimental_relax_shapes=True)\n self.test_function = test_function\n if self._cluster_coordinator:\n self.test_function = lambda iterator: self._cluster_coordinator.schedule(test_function, args=(iterator,))\n return self.test_function", + "docstring": "Creates a function that executes one step of evaluation. This method can be overridden to support custom evaluation logic. This method is called by and . Typically, this method directly controls and settings, and delegates the actual evaluation logic to . This function is cached the first time or is called. The cache is cleared whenever is called. Returns: Function. The function created by this method should accept a , and return a containing values that will be passed to .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", + "ast_data": "FunctionDef name:make_test_function arg:self arguments arg If Compare Return return:yes FunctionDef name:step_function arg:model arg:iterator arguments arg arg FunctionDef name:run_step arg:data arguments arg Assign Call With Call Call Call Return return:yes Assign Call Assign Call Assign Call Return return:yes If Compare Call Call FunctionDef name:test_function arg:iterator arguments arg Return return:yes Call FunctionDef name:test_function arg:iterator arguments arg For Call Assign Call Return return:yes If Assign Call Assign If Assign arguments arg Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "variable_shape", + "source_code": "@property\ndef variable_shape(self):\n return tensor_shape.TensorShape([self.shared_embedding_column_creator.dimension])", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:variable_shape arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_from_components", + "source_code": "@abc.abstractmethod\ndef _from_components(self, components):\n raise NotImplementedError('%s._from_components()' % type(self).__name__)", + "docstring": "Reconstructs a value from a nested structure of Tensor/CompositeTensor. Args: components: A nested structure of or , compatible with . (Caller is responsible for ensuring compatibility.) Returns: A value that is compatible with this .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", + "ast_data": "FunctionDef name:_from_components arg:self arg:components arguments arg arg Raise Call Call" + }, + { + "library": "pytorch", + "name": "parse_dims", + "source_code": "@classmethod\ndef parse_dims(cls, input_dims: list[str], output_dim: str) -> 'EinsumDims':\n dim_char_set: set[str] = set()\n for input_dim in input_dims:\n dim_char_set.update(input_dim)\n all_dim_chars = sorted(dim_char_set)\n lhs_out_only_dims, rhs_out_only_dims = ([], [])\n batch_dims, contracting_dims = ([], [])\n for dim_char in all_dim_chars:\n if dim_char not in output_dim:\n contracting_dims.append(dim_char)\n else:\n is_batch_dim = True\n for input_dim in input_dims:\n is_batch_dim = is_batch_dim and dim_char in input_dim\n if is_batch_dim:\n batch_dims.append(dim_char)\n else:\n assert len(input_dims) == 2, 'free dimension only supported for two inputs!'\n lhs, rhs = input_dims\n if dim_char in lhs:\n lhs_out_only_dims.append(dim_char)\n elif dim_char in rhs:\n rhs_out_only_dims.append(dim_char)\n else:\n raise RuntimeError('Invalid dimension character')\n return cls(contracting_dims=contracting_dims, batch_dims=batch_dims, lhs_out_only_dims=lhs_out_only_dims, rhs_out_only_dims=rhs_out_only_dims)", + "docstring": "Parse the dims and extract the contracting, batch, and free dimensions for the left and right hand sides.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_einsum_strategy.py", + "ast_data": "FunctionDef name:parse_dims arg:cls arg:input_dims arg:output_dim arguments arg arg arg Call For Call Assign Call Assign Assign For If Compare Call Assign For Assign BoolOp Compare If Call Compare Call Assign If Compare Call If Compare Call Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "indent_xml", + "source_code": "def indent_xml(elem, level=0) -> None:\n indent_str = '\\n' + level * ' '\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = indent_str + ' '\n if not elem.tail or not elem.tail.strip():\n elem.tail = indent_str\n for elem in elem:\n indent_xml(elem, level + 1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = indent_str\n elif level and (not elem.tail or not elem.tail.strip()):\n elem.tail = indent_str", + "docstring": "Indents and newlines the XML for better output.", + "type": "function", + "file_path": "tensorflow\\ci\\official\\utilities\\extract_resultstore_links.py", + "ast_data": "FunctionDef name:indent_xml arg:elem arg:level arguments arg arg Assign If Call If BoolOp Call Assign If BoolOp Call Assign For Call If BoolOp Call Assign If BoolOp BoolOp Call Assign" + }, + { + "library": "sphinx", + "name": "number_reference", + "source_code": "class number_reference(nodes.reference):\n pass", + "docstring": "Node for number references, similar to pending_xref.", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:number_reference" + }, + { + "library": "pandas", + "name": "construct_1d_object_array_from_listlike", + "source_code": "def construct_1d_object_array_from_listlike(values: Collection) -> np.ndarray:\n return np.fromiter(values, dtype='object', count=len(values))", + "docstring": "Transform any list-like object in a 1-dimensional numpy array of object dtype. Parameters ---------- values : any iterable which has a len() Raises ------ TypeError * If does not have a len() Returns ------- 1-dimensional numpy array of dtype object", + "type": "function", + "file_path": "pandas\\pandas\\core\\dtypes\\cast.py", + "ast_data": "FunctionDef name:construct_1d_object_array_from_listlike arg:values arguments arg Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "amin", + "source_code": "@array_function_dispatch(_min_dispatcher)\ndef amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue):\n return _wrapreduction(a, np.minimum, 'min', axis, None, out, keepdims=keepdims, initial=initial, where=where)", + "docstring": "Return the minimum of an array or minimum along an axis. is an alias of . See Also -------- min : alias of this function ndarray.min : equivalent method", + "type": "function", + "file_path": "numpy\\numpy\\_core\\fromnumeric.py", + "ast_data": "FunctionDef name:amin arg:a arg:axis arg:out arg:keepdims arg:initial arg:where arguments arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "UnshardHandle", + "source_code": "class UnshardHandle:\n\n def wait(self) -> None:\n return", + "docstring": "A handle to wait on a :meth: op.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py", + "ast_data": "ClassDef name:UnshardHandle FunctionDef name:wait arg:self arguments arg Return return:no" + }, + { + "library": "scipy", + "name": "Rastrigin", + "source_code": "class Rastrigin(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))\n self.global_optimum = [[0 for _ in range(self.N)]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n return 10.0 * self.N + sum(x ** 2.0 - 10.0 * cos(2.0 * pi * x))", + "docstring": "Rastrigin objective function. This class defines the Rastrigin [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Rastrigin}}(x) = 10n \\sum_{i=1}^n \\left[ x_i^2 - 10 \\cos(2\\pi x_i) \\right] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_R.py", + "ast_data": "ClassDef name:Rastrigin Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_plot", + "source_code": "def _plot(results, metrics, formats, title, x_ticks, x_label, format_markers=('x', '|', 'o', '+'), metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):\n fig = plt.figure('scikit-learn multilabel metrics benchmarks')\n plt.title(title)\n ax = fig.add_subplot(111)\n for i, metric in enumerate(metrics):\n for j, format in enumerate(formats):\n ax.plot(x_ticks, results[i, j].flat, label='{}, {}'.format(metric, format), marker=format_markers[j], color=metric_colors[i % len(metric_colors)])\n ax.set_xlabel(x_label)\n ax.set_ylabel('Time (s)')\n ax.legend()\n plt.show()", + "docstring": "Plot the results by metric, format and some other variable given by x_label", + "type": "function", + "file_path": "scikit-learn\\benchmarks\\bench_multilabel_metrics.py", + "ast_data": "FunctionDef name:_plot arg:results arg:metrics arg:formats arg:title arg:x_ticks arg:x_label arg:format_markers arg:metric_colors arguments arg arg arg arg arg arg arg arg Assign Call Call Assign Call For Call For Call Call Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_check_valid_event_ndims", + "source_code": "def _check_valid_event_ndims(self, min_event_ndims, event_ndims):\n event_ndims = ops.convert_to_tensor(event_ndims, name='event_ndims')\n event_ndims_ = tensor_util.constant_value(event_ndims)\n assertions = []\n if not event_ndims.dtype.is_integer:\n raise ValueError('Expected integer dtype, got dtype {}'.format(event_ndims.dtype))\n if event_ndims_ is not None:\n if event_ndims.shape.ndims != 0:\n raise ValueError('Expected scalar event_ndims, got shape {}'.format(event_ndims.shape))\n if min_event_ndims > event_ndims_:\n raise ValueError('event_ndims ({}) must be larger than min_event_ndims ({})'.format(event_ndims_, min_event_ndims))\n elif self.validate_args:\n assertions += [check_ops.assert_greater_equal(event_ndims, min_event_ndims)]\n if event_ndims.shape.is_fully_defined():\n if event_ndims.shape.ndims != 0:\n raise ValueError('Expected scalar shape, got ndims {}'.format(event_ndims.shape.ndims))\n elif self.validate_args:\n assertions += [check_ops.assert_rank(event_ndims, 0, message='Expected scalar.')]\n return assertions", + "docstring": "Check whether event_ndims is at least min_event_ndims.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py", + "ast_data": "FunctionDef name:_check_valid_event_ndims arg:self arg:min_event_ndims arg:event_ndims arguments arg arg arg Assign Call Assign Call Assign If Raise Call Call If Compare If Compare Raise Call Call If Compare Raise Call Call If Call If Call If Compare Raise Call Call If Call Return return:yes" + }, + { + "library": "django", + "name": "upload_interrupted", + "source_code": "def upload_interrupted(self):\n pass", + "docstring": "Signal that the upload was interrupted. Subclasses should perform cleanup that is necessary for this handler.", + "type": "method", + "file_path": "django\\django\\core\\files\\uploadhandler.py", + "ast_data": "FunctionDef name:upload_interrupted arg:self arguments arg" + }, + { + "library": "django", + "name": "wkb_size", + "source_code": "@property\ndef wkb_size(self):\n return capi.get_wkbsize(self.ptr)", + "docstring": "Return the size of the WKB buffer.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:wkb_size arg:self arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_set_random_states", + "source_code": "def _set_random_states(estimator, random_state=None):\n random_state = check_random_state(random_state)\n to_set = {}\n for key in sorted(estimator.get_params(deep=True)):\n if key == 'random_state' or key.endswith('__random_state'):\n to_set[key] = random_state.randint(np.iinfo(np.int32).max)\n if to_set:\n estimator.set_params(**to_set)", + "docstring": "Set fixed random_state parameters for an estimator. Finds all parameters ending `Glossary ` rvs", + "type": "function", + "file_path": "scikit-learn\\sklearn\\ensemble\\_base.py", + "ast_data": "FunctionDef name:_set_random_states arg:estimator arg:random_state arguments arg arg Assign Call Assign For Call Call If BoolOp Compare Call Assign Call Call If Call" + }, + { + "library": "tensorflow", + "name": "_apply_fn", + "source_code": "def _apply_fn(dataset):\n return dataset.rejection_resample(class_func=class_func, target_dist=target_dist, initial_dist=initial_dist, seed=seed)", + "docstring": "Function from to that applies the transformation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\resampling.py", + "ast_data": "FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_snapshot_streams", + "source_code": "def _snapshot_streams(self, path) -> Iterable[_pywrap_server_lib.SnapshotStreamInfoWrapper]:\n return self._server.snapshot_streams(path)", + "docstring": "Returns information about all the streams for a snapshot.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py", + "ast_data": "FunctionDef name:_snapshot_streams arg:self arg:path arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "to_native", + "source_code": "def to_native(A):\n dt = A.dtype\n if dt.isnative:\n return A\n return np.asarray(A, dtype=dt.newbyteorder('native'))", + "docstring": "Ensure that the data type of the NumPy array has native byte order. must be a NumPy array. If the data type of does not have native byte order, a copy of with a native byte order is returned. Otherwise is returned.", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\_sputils.py", + "ast_data": "FunctionDef name:to_native arg:A arguments arg Assign If Return return:yes Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "printoptions", + "source_code": "@set_module('numpy')\n@contextlib.contextmanager\ndef printoptions(*args, **kwargs):\n token = _set_printoptions(*args, **kwargs)\n try:\n yield get_printoptions()\n finally:\n format_options.reset(token)", + "docstring": "Context manager for setting print options. Set print options for the scope of the block, and restore the old options at the end. See for the full description of available options. Examples -------- >>> import numpy as np >>> from numpy.testing import assert_equal >>> with np.printoptions(precision=2): ... np.array([2.0]) / 3 array([0.67]) The -clause of the -statement gives the current print options: >>> with np.printoptions(precision=2) as opts: ... assert_equal(opts, np.get_printoptions()) See Also -------- set_printoptions, get_printoptions", + "type": "function", + "file_path": "numpy\\numpy\\_core\\arrayprint.py", + "ast_data": "FunctionDef name:printoptions arguments arg arg Assign Call Try Call Call Call" + }, + { + "library": "pytorch", + "name": "all_node_args_except_first", + "source_code": "def all_node_args_except_first(node: Node) -> list[int]:\n return list(range(1, len(node.args)))", + "docstring": "Returns all node arg indices after first", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py", + "ast_data": "FunctionDef name:all_node_args_except_first arg:node arguments arg Return return:yes Call Call Call" + }, + { + "library": "pandas", + "name": "_get_offsets_buffer", + "source_code": "def _get_offsets_buffer(self) -> tuple[PandasBuffer, Any]:\n if self.dtype[0] == DtypeKind.STRING:\n values = self._col.to_numpy()\n ptr = 0\n offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64)\n for i, v in enumerate(values):\n if isinstance(v, str):\n b = v.encode(encoding='utf-8')\n ptr += len(b)\n offsets[i + 1] = ptr\n buffer = PandasBuffer(offsets)\n dtype = (DtypeKind.INT, 64, ArrowCTypes.INT64, Endianness.NATIVE)\n else:\n raise NoBufferPresent('This column has a fixed-length dtype so it does not have an offsets buffer')\n return (buffer, dtype)", + "docstring": "Return the buffer containing the offset values for variable-size binary data (e.g., variable-length strings) and the buffer's associated dtype. Raises NoBufferPresent if the data buffer does not have an associated offsets buffer.", + "type": "method", + "file_path": "pandas\\pandas\\core\\interchange\\column.py", + "ast_data": "FunctionDef name:_get_offsets_buffer arg:self arguments arg If Compare Assign Call Assign Assign Call Call For Call If Call Assign Call Call Assign Assign Call Assign Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, filenames, compression_type=None, buffer_size=None, name=None):\n self._filenames = filenames\n self._compression_type = convert.optional_param_to_tensor('compression_type', compression_type, argument_default='', argument_dtype=dtypes.string)\n self._buffer_size = convert.optional_param_to_tensor('buffer_size', buffer_size, argument_default=_DEFAULT_READER_BUFFER_SIZE_BYTES)\n self._name = name\n variant_tensor = gen_dataset_ops.text_line_dataset(self._filenames, self._compression_type, self._buffer_size, metadata=self._metadata.SerializeToString())\n super(_TextLineDataset, self).__init__(variant_tensor)", + "docstring": "Creates a . Args: filenames: A tensor containing one or more filenames. compression_type: (Optional.) A scalar evaluating to one of (no compression), , or . buffer_size: (Optional.) A scalar denoting the number of bytes to buffer. A value of 0 results in the default buffering values chosen based on the compression type. name: (Optional.) A name for the tf.data operation.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:filenames arg:compression_type arg:buffer_size arg:name arguments arg arg arg arg arg Assign Assign Call Assign Call Assign Assign Call Call Call Call" + }, + { + "library": "pytorch", + "name": "release", + "source_code": "def release(self):\n if self.fd is not None:\n os.close(self.fd)\n os.remove(self.lock_file_path)", + "docstring": "Release the baton and removes its file.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\file_baton.py", + "ast_data": "FunctionDef name:release arg:self arguments arg If Compare Call Call" + }, + { + "library": "pytorch", + "name": "get_idx_from_placements", + "source_code": "def get_idx_from_placements(placements, current_rank) -> int:\n for idx, placement in enumerate(placements):\n if current_rank == placement.rank():\n return idx\n raise RuntimeError('current_rank not in the placement.')", + "docstring": "Return the position of the current rank in the given placements. Args: placements(List[Union[_remote_device, str]]): Specifies the placement of each shard of the Tensor. The size of the list represents the number of shards to be created. This could be a list of :class:'s. This list could also contain a string which represents remote device as accepted by :class: current_rank (int): number of current device. Returns: A int which contains the position of current device in the placement list.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\reshard.py", + "ast_data": "FunctionDef name:get_idx_from_placements arg:placements arg:current_rank arguments arg arg For Call If Compare Call Return return:yes Raise Call" + }, + { + "library": "pytorch", + "name": "clone_inputs_retaining_gradness", + "source_code": "def clone_inputs_retaining_gradness(example_inputs):\n cloned_inputs = clone_inputs(example_inputs)\n for idx in range(len(example_inputs)):\n if isinstance(cloned_inputs[idx], torch.Tensor):\n cloned_inputs[idx].requires_grad_(example_inputs[idx].requires_grad)\n return cloned_inputs", + "docstring": "This clone inputs is different from utils clone_input. In case of minifier, all the tensors are leaf tensors while creating a new graph. So, we set the requires_grad field w/o checking the leafness of the tensor.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\debug_utils.py", + "ast_data": "FunctionDef name:clone_inputs_retaining_gradness arg:example_inputs arguments arg Assign Call For Call Call If Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_list_to_string", + "source_code": "def _list_to_string(l, s):\n return s.join(l)", + "docstring": "Concatenates list items into a single string separated by . Args: l: List with items to be concatenated into a single string. s: String or char that will be concatenated in between each item. Returns: String that has all items in list concatenated with separator.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\compat_checker\\compat_checker.py", + "ast_data": "FunctionDef name:_list_to_string arg:l arg:s arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, verts, sizes=None, *, closed=True, **kwargs):\n super().__init__(**kwargs)\n self.set_sizes(sizes)\n self.set_verts(verts, closed)\n self.stale = True", + "docstring": "Parameters ---------- verts : list of array-like The sequence of polygons [*verts0*, *verts1*, ...] where each element *verts_i* defines the vertices of polygon *i* as a 2D array-like of shape (M, 2). sizes : array-like, default: None Squared scaling factors for the polygons. The coordinates of each polygon *verts_i* are multiplied by the square-root of the corresponding entry in *sizes* (i.e., *sizes* specify the scaling of areas). The scaling is applied before the Artist master transform. closed : bool, default: True Whether the polygon should be closed by adding a CLOSEPOLY connection at the end. **kwargs Forwarded to .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:verts arg:sizes arguments arg arg arg arg arg Call Call Call Call Assign" + }, + { + "library": "tensorflow", + "name": "exceptions_raised", + "source_code": "@property\ndef exceptions_raised(self):\n return self._exceptions_raised", + "docstring": "Exceptions raised but not handled by the threads. Exceptions raised in queue runner threads are handled in one of two ways depending on whether or not a was passed to : * With a , exceptions are reported to the coordinator and forgotten by the . * Without a , exceptions are captured by the and made available in this property. Returns: A list of Python objects. The list is empty if no exception was captured. (No exceptions are captured when using a Coordinator.)", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\queue_runner_impl.py", + "ast_data": "FunctionDef name:exceptions_raised arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_StatelessGammaGradAlpha", + "source_code": "def _StatelessGammaGradAlpha(shape, alpha, sample, grad):\n num_sample_dimensions = array_ops.shape(shape)[0] - array_ops.rank(alpha)\n alpha_broadcastable = add_leading_unit_dimensions(alpha, num_sample_dimensions)\n partial_a = gen_random_ops.random_gamma_grad(alpha_broadcastable, sample)\n return math_ops.reduce_sum(grad * partial_a, axis=math_ops.range(num_sample_dimensions))", + "docstring": "Returns gradients of a gamma sampler wrt alpha.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\random_grad.py", + "ast_data": "FunctionDef name:_StatelessGammaGradAlpha arg:shape arg:alpha arg:sample arg:grad arguments arg arg arg arg Assign Call Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "get_qconfig_info", + "source_code": "def get_qconfig_info(self, model) -> dict[str, DetectorQConfigInfo]:\n return {}", + "docstring": "Returns the DetectorQConfigInfo for each module_fqn relevant Args model (nn.Module or subclass): model to find observer insertion points Returns a Dict mapping from unique observer fqns (where we want to insert them) to: A DetectorQConfigInfo with the information to generate a QConfig for a specific module", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py", + "ast_data": "FunctionDef name:get_qconfig_info arg:self arg:model arguments arg arg Return return:no" + }, + { + "library": "tensorflow", + "name": "recoverable", + "source_code": "def recoverable(self):\n state = self.state()\n symptoms = self.symptoms()\n if state and state in ['TERMINATED', 'PREEMPTED']:\n return False\n elif FLAGS.runtime_oom_exit and self._oom_event(symptoms):\n return False\n elif FLAGS.hbm_oom_exit and self._hbm_oom_event(symptoms):\n return False\n return True", + "docstring": "Returns true if the TPU is in a state where training should eventually resume. If false the TPU is in a unrecoverable state and should be recreated.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py", + "ast_data": "FunctionDef name:recoverable arg:self arguments arg Assign Call Assign Call If BoolOp Compare Return return:yes If BoolOp Call Return return:yes If BoolOp Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "edges", + "source_code": "@property\ndef edges(self):\n return self._edges", + "docstring": "The default value of for newly added cells using . Notes ----- This setting does currently only affect newly created cells using . To change existing cells, you have to set their edges explicitly:: for c in tab.get_celld().values(): c.visible_edges = 'horizontal'", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\table.py", + "ast_data": "FunctionDef name:edges arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, queue, ev_writer, flush_secs, flush_complete, flush_sentinel, close_sentinel):\n threading.Thread.__init__(self, name='EventLoggerThread')\n self.daemon = True\n self._queue = queue\n self._ev_writer = ev_writer\n self._flush_secs = flush_secs\n self._next_event_flush_time = 0\n self._flush_complete = flush_complete\n self._flush_sentinel = flush_sentinel\n self._close_sentinel = close_sentinel\n self.failure_exc_info = ()", + "docstring": "Creates an _EventLoggerThread. Args: queue: A CloseableQueue from which to dequeue events. The queue will be closed just before the thread exits, whether due to or any exception raised in the writing loop. ev_writer: An event writer. Used to log brain events for the visualizer. flush_secs: How often, in seconds, to flush the pending file to disk. flush_complete: A threading.Event that will be set whenever a flush operation requested via has been completed. flush_sentinel: A sentinel element in queue that tells this thread to flush the writer and mark the current flush operation complete. close_sentinel: A sentinel element in queue that tells this thread to terminate and close the queue.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:queue arg:ev_writer arg:flush_secs arg:flush_complete arg:flush_sentinel arg:close_sentinel arguments arg arg arg arg arg arg arg Call Assign Assign Assign Assign Assign Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "_common_prefix", + "source_code": "def _common_prefix(self, m):\n if not m:\n return ''\n s1 = min(m)\n s2 = max(m)\n for i, c in enumerate(s1):\n if c != s2[i]:\n return s1[:i]\n return s1", + "docstring": "Given a list of str, returns the longest common prefix. Args: m: (list of str) A list of strings. Returns: (str) The longest common prefix.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", + "ast_data": "FunctionDef name:_common_prefix arg:self arg:m arguments arg arg If Return return:yes Assign Call Assign Call For Call If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "master_job", + "source_code": "def master_job(master, cluster_def):\n if master in _LOCAL_MASTERS:\n return None\n if not cluster_def or not cluster_def.job:\n return _DEFAULT_JOB_NAME\n job_names = set((job.name for job in cluster_def.job))\n if _DEFAULT_JOB_NAME in job_names:\n raise ValueError('Currently, tpu_worker is not an allowed job name.')\n if len(job_names) == 1:\n return cluster_def.job[0].name\n if len(job_names) == 2:\n if _DEFAULT_COORDINATOR_JOB_NAME in job_names:\n job_names.remove(_DEFAULT_COORDINATOR_JOB_NAME)\n return job_names.pop()\n raise ValueError('Could not infer TPU job name.')", + "docstring": "Returns the canonical job name to use to place TPU computations on. Args: master: A representing the TensorFlow master to use. cluster_def: A ClusterDef object describing the TPU cluster. Returns: A string containing the job name, or None if no job should be specified. Raises: ValueError: If the user needs to specify a tpu_job_name, because we are unable to infer the job name automatically, or if the user-specified job names are inappropriate.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_system_metadata.py", + "ast_data": "FunctionDef name:master_job arg:master arg:cluster_def arguments arg arg If Compare Return return:no If BoolOp Return return:yes Assign Call If Compare Raise Call If Compare Call Return return:yes If Compare Call If Compare Call Return return:yes Call Raise Call" + }, + { + "library": "scipy", + "name": "fun", + "source_code": "@property\ndef fun(self):\n if self._f is None:\n self._f = self._fun(self._x)\n return self._f", + "docstring": "Value of objective function at current iteration.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_trustregion.py", + "ast_data": "FunctionDef name:fun arg:self arguments arg If Compare Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "close", + "source_code": "def close(self, death_sig: Optional[signal.Signals]=None, timeout: int=30) -> None:\n if not death_sig:\n death_sig = _get_default_signal()\n self._close(death_sig=death_sig, timeout=timeout)\n if self._stdout_tail:\n self._stdout_tail.stop()\n if self._stderr_tail:\n self._stderr_tail.stop()", + "docstring": "Terminates all processes managed by this context and cleans up any meta resources (e.g. redirect, error_file files). Args: death_sig: Death signal to terminate processes. timeout: Time to wait for processes to finish, if process is still alive after this time, it will be terminated via SIGKILL.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py", + "ast_data": "FunctionDef name:close arg:self arg:death_sig arg:timeout arguments arg arg arg If Assign Call Call If Call If Call" + }, + { + "library": "matplotlib", + "name": "get_pickradius", + "source_code": "def get_pickradius(self):\n return self._pickradius", + "docstring": "Return the depth of the axis used by the picker.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:get_pickradius arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, t_direction, t, f1, f2, *, where=None, interpolate=False, step=None, **kwargs):\n self.t_direction = t_direction\n self._interpolate = interpolate\n self._step = step\n verts = self._make_verts(t, f1, f2, where)\n super().__init__(verts, **kwargs)", + "docstring": "Parameters ---------- t_direction : {{'x', 'y'}} The axes on which the variable lies. - 'x': the curves are `.PolyCollection`. See Also -------- .Axes.fill_between, .Axes.fill_betweenx", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:t_direction arg:t arg:f1 arg:f2 arguments arg arg arg arg arg arg arg arg arg Assign Assign Assign Assign Call Call Call" + }, + { + "library": "scipy", + "name": "save_npz", + "source_code": "def save_npz(file, matrix, compressed=True):\n arrays_dict = {}\n if matrix.format in ('csc', 'csr', 'bsr'):\n arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr)\n elif matrix.format == 'dia':\n arrays_dict.update(offsets=matrix.offsets)\n elif matrix.format == 'coo':\n arrays_dict.update(row=matrix.row, col=matrix.col)\n else:\n msg = f'Save is not implemented for sparse matrix of format {matrix.format}.'\n raise NotImplementedError(msg)\n arrays_dict.update(format=matrix.format.encode('ascii'), shape=matrix.shape, data=matrix.data)\n if isinstance(matrix, sp.sparse.sparray):\n arrays_dict.update(_is_array=True)\n if compressed:\n np.savez_compressed(file, **arrays_dict)\n else:\n np.savez(file, **arrays_dict)", + "docstring": "Save a sparse matrix or array to a file using `` archive. Examples -------- Store sparse matrix to disk, and load it again: >>> import numpy as np >>> import scipy as sp >>> sparse_matrix = sp.sparse.csc_matrix([[0, 0, 3], [4, 0, 0]]) >>> sparse_matrix >>> sparse_matrix.toarray() array([[0, 0, 3], [4, 0, 0]], dtype=int64) >>> sp.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) >>> sparse_matrix = sp.sparse.load_npz('/tmp/sparse_matrix.npz') >>> sparse_matrix >>> sparse_matrix.toarray() array([[0, 0, 3], [4, 0, 0]], dtype=int64)", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\_matrix_io.py", + "ast_data": "FunctionDef name:save_npz arg:file arg:matrix arg:compressed arguments arg arg arg Assign If Compare Call If Compare Call If Compare Call Assign Raise Call Call Call If Call Call If Call Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "@_docstring.interpd\ndef __init__(self, xy, width, height, *, angle=0.0, theta1=0.0, theta2=360.0, **kwargs):\n fill = kwargs.setdefault('fill', False)\n if fill:\n raise ValueError('Arc objects cannot be filled')\n super().__init__(xy, width, height, angle=angle, **kwargs)\n self.theta1 = theta1\n self.theta2 = theta2\n self._theta1, self._theta2, self._stretched_width, self._stretched_height = self._theta_stretch()\n self._path = Path.arc(self._theta1, self._theta2)", + "docstring": "Parameters ---------- xy : (float, float) The center of the ellipse. width : float The length of the horizontal axis. height : float The length of the vertical axis. angle : float Rotation of the ellipse in degrees (counterclockwise). theta1, theta2 : float, default: 0, 360 Starting and ending angles of the arc in degrees. These values are relative to *angle*, e.g. if *angle* = 45 and *theta1* = 90 the absolute starting angle is 135. Default *theta1* = 0, *theta2* = 360, i.e. a complete ellipse. The arc is drawn in the counterclockwise direction. Angles greater than or equal to 360, or smaller than 0, are represented by an equivalent angle in the range [0, 360), by taking the input value mod 360. Other Parameters ---------------- **kwargs : properties Most properties are supported as keyword arguments, except *fill* and *facecolor* because filling is not supported. %(Patch:kwdoc)s", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:xy arg:width arg:height arguments arg arg arg arg arg arg arg arg Assign Call If Raise Call Call Call Assign Assign Assign Call Assign Call" + }, + { + "library": "pandas", + "name": "_extended_gcd", + "source_code": "def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:\n s, old_s = (0, 1)\n t, old_t = (1, 0)\n r, old_r = (b, a)\n while r:\n quotient = old_r // r\n old_r, r = (r, old_r - quotient * r)\n old_s, s = (s, old_s - quotient * s)\n old_t, t = (t, old_t - quotient * t)\n return (old_r, old_s, old_t)", + "docstring": "Extended Euclidean algorithms to solve Bezout's identity: a*x + b*y = gcd(x, y) Finds one particular solution for x, y: s, t Returns: gcd, s, t", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\range.py", + "ast_data": "FunctionDef name:_extended_gcd arg:self arg:a arg:b arguments arg arg arg Assign Assign Assign While Assign Assign Assign Assign Return return:yes" + }, + { + "library": "django", + "name": "get_form_class", + "source_code": "def get_form_class(self):\n return self.form_class", + "docstring": "Return the form class to use.", + "type": "method", + "file_path": "django\\django\\views\\generic\\edit.py", + "ast_data": "FunctionDef name:get_form_class arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n raw_predictions = self.decision_function(X)\n if raw_predictions.ndim == 1:\n encoded_classes = (raw_predictions >= 0).astype(int)\n else:\n encoded_classes = np.argmax(raw_predictions, axis=1)\n return self.classes_[encoded_classes]", + "docstring": "Predict class for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``. Returns ------- y : ndarray of shape (n_samples,) The predicted values.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call If Compare Assign Call Compare Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_is_deprecated", + "source_code": "def _is_deprecated(func):\n closures = getattr(func, '__closure__', [])\n if closures is None:\n closures = []\n is_deprecated = 'deprecated' in ''.join([c.cell_contents for c in closures if isinstance(c.cell_contents, str)])\n return is_deprecated", + "docstring": "Helper to check if func is wrapped by our deprecated decorator", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\deprecation.py", + "ast_data": "FunctionDef name:_is_deprecated arg:func arguments arg Assign Call If Compare Assign Assign Compare Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "setup_context", + "source_code": "@staticmethod\ndef setup_context(ctx: Any, inputs: tuple[Any, ...], output: Any) -> Any:\n raise NotImplementedError('setup_context is not implemented.')", + "docstring": "There are two ways to define the forward pass of an autograd.Function. Either: 1. Override forward with the signature `torch.autograd.Function.forwardextending-autograd` for more details.", + "type": "method", + "file_path": "pytorch\\torch\\autograd\\function.py", + "ast_data": "FunctionDef name:setup_context arg:ctx arg:inputs arg:output arguments arg arg arg Raise Call" + }, + { + "library": "scikit-learn", + "name": "predict_proba", + "source_code": "@available_if(_final_estimator_has('predict_proba'))\ndef predict_proba(self, X, **params):\n with _raise_or_warn_if_not_fitted(self):\n Xt = X\n if not _routing_enabled():\n for _, name, transform in self._iter(with_final=False):\n Xt = transform.transform(Xt)\n return self.steps[-1][1].predict_proba(Xt, **params)\n routed_params = process_routing(self, 'predict_proba', **params)\n for _, name, transform in self._iter(with_final=False):\n Xt = transform.transform(Xt, **routed_params[name].transform)\n return self.steps[-1][1].predict_proba(Xt, **routed_params[self.steps[-1][0]].predict_proba)", + "docstring": "Transform the data, and apply with the final estimator. Call of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls method. Only valid if the final estimator implements . Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. **params : dict of str -> object - If (default): Parameters to the called at the end of all transformations in the pipeline. - If : Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 0.20 .. versionchanged:: 1.4 Parameters are now passed to the `enable_metadata_routing=TrueMetadata Routing User Guide predict_proba` on the final estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg arg With Call Assign If Call For Call Assign Call Return return:yes Call Assign Call For Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "update_state", + "source_code": "def update_state(self, y_true, y_pred, sample_weight=None):\n deps = []\n if not self._built:\n self._build(tensor_shape.TensorShape(y_pred.shape))\n if self.multi_label or self.label_weights is not None:\n shapes = [(y_true, ('N', 'L'))]\n if self.multi_label:\n shapes.extend([(self.true_positives, ('T', 'L')), (self.true_negatives, ('T', 'L')), (self.false_positives, ('T', 'L')), (self.false_negatives, ('T', 'L'))])\n if self.label_weights is not None:\n shapes.append((self.label_weights, ('L',)))\n deps = [check_ops.assert_shapes(shapes, message='Number of labels is not consistent.')]\n label_weights = None if self.multi_label else self.label_weights\n if self._from_logits:\n y_pred = activations.sigmoid(y_pred)\n with ops.control_dependencies(deps):\n return metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, self._thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, sample_weight=sample_weight, multi_label=self.multi_label, label_weights=label_weights)", + "docstring": "Accumulates confusion matrix statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a whose rank is either 0, or the same rank as , and must be broadcastable to . Returns: Update op.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "FunctionDef name:update_state arg:self arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg arg Assign If Call Call If BoolOp Compare Assign If Call If Compare Call Assign Call Assign If Assign Call With Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_transform_prepacked_op", + "source_code": "def _transform_prepacked_op(gm: torch.fx.GraphModule, node: torch.fx.Node):\n assert isinstance(node.target, torch._ops.OpOverload)\n opname, args = (node.target._opname, node.args)\n op_f = None\n if opname == 'conv2d_clamp_run':\n op_f = torch.ops.aten.conv2d\n elif opname == 'linear_clamp_run':\n op_f = torch.ops.aten.linear\n else:\n raise RuntimeError(f'Invalid operator {opname}')\n assert isinstance(args[1], torch.fx.Node)\n so = get_script_object(gm, args[1])\n func_args = []\n func_args += [args[0]]\n func_args += so.unpack()[:2]\n if opname == 'conv2d_clamp_run':\n func_args += torch.ops.prepacked.unpack_prepacked_sizes_conv2d(so)[2:]\n op_res_node = gm.graph.call_function(op_f, tuple(func_args))\n return op_res_node", + "docstring": "Transformation for functions under prepacked namespace, where they share the same handling logic that [...]OpContext contains all parameters.", + "type": "function", + "file_path": "pytorch\\torch\\_export\\passes\\replace_quantized_ops_with_standard_ops_pass.py", + "ast_data": "FunctionDef name:_transform_prepacked_op arg:gm arg:node arguments arg arg Call Assign Assign If Compare Assign If Compare Assign Raise Call Call Assign Call Assign Call If Compare Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_value", + "source_code": "@doc_controls.do_not_generate_docs\ndef get_value(x):\n if not tensor_util.is_tf_type(x):\n return x\n if context.executing_eagerly() or isinstance(x, ops.EagerTensor):\n return x.numpy()\n if not getattr(x, '_in_graph_mode', True):\n with context.eager_mode():\n return x.numpy()\n if ops.executing_eagerly_outside_functions():\n with ops.init_scope():\n return x.numpy()\n with x.graph.as_default():\n return x.eval(session=get_session((x,)))", + "docstring": "Returns the value of a variable. is the complement of , and provides a generic interface for reading from variables while abstracting away the differences between TensorFlow 1.x and 2.x semantics. {snippet} Args: x: input variable. Returns: A Numpy array.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:get_value arg:x arguments arg If Call Return return:yes If BoolOp Call Call Return return:yes Call If Call With Call Return return:yes Call If Call With Call Return return:yes Call With Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_copy_tensors_to_device", + "source_code": "def _copy_tensors_to_device(self, partitioned_tensors: Dict[str, Any]) -> Any:\n partitioned_device_tensors = {}\n for table_name in partitioned_tensors:\n partitioned_tensor = partitioned_tensors[table_name][0]\n row_pointers_unpadded_size = partitioned_tensors[table_name][1]\n ids_unpadded_size = partitioned_tensors[table_name][2]\n row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains = xla_ops.tpu_copy_with_dynamic_shape([partitioned_tensor.row_pointers, partitioned_tensor.sorted_sample_ids, partitioned_tensor.sorted_token_ids, partitioned_tensor.sorted_gains], [row_pointers_unpadded_size, ids_unpadded_size, ids_unpadded_size, ids_unpadded_size])\n row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains = xla_ops.tpu_annotate_tensors_with_dynamic_shape([row_pointers, sorted_sample_ids, sorted_token_ids, sorted_gains])\n partitioned_device_tensors[table_name] = PartitionedCsrFormatTensor(row_pointers=row_pointers, sorted_sample_ids=sorted_sample_ids, sorted_token_ids=sorted_token_ids, sorted_gains=sorted_gains, sample_count=partitioned_tensor.sample_count, num_minibatches_per_physical_sparse_core=partitioned_tensor.num_minibatches_per_physical_sparse_core)\n return partitioned_device_tensors", + "docstring": "Copy tensors to device.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py", + "ast_data": "FunctionDef name:_copy_tensors_to_device arg:self arg:partitioned_tensors arguments arg arg Assign For Assign Assign Assign Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "load_state_dict_from_url", + "source_code": "def load_state_dict_from_url(url: str, model_dir: Optional[str]=None, map_location: MAP_LOCATION=None, progress: bool=True, check_hash: bool=False, file_name: Optional[str]=None, weights_only: bool=False) -> dict[str, Any]:\n if os.getenv('TORCH_MODEL_ZOO'):\n warnings.warn('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead')\n if model_dir is None:\n hub_dir = get_dir()\n model_dir = os.path.join(hub_dir, 'checkpoints')\n os.makedirs(model_dir, exist_ok=True)\n parts = urlparse(url)\n filename = os.path.basename(parts.path)\n if file_name is not None:\n filename = file_name\n cached_file = os.path.join(model_dir, filename)\n if not os.path.exists(cached_file):\n sys.stdout.write(f'Downloading: \"{url}\" to {cached_file}\\n')\n hash_prefix = None\n if check_hash:\n r = HASH_REGEX.search(filename)\n hash_prefix = r.group(1) if r else None\n download_url_to_file(url, cached_file, hash_prefix, progress=progress)\n if _is_legacy_zip_format(cached_file):\n return _legacy_zip_load(cached_file, model_dir, map_location, weights_only)\n return torch.load(cached_file, map_location=map_location, weights_only=weights_only)", + "docstring": "Loads the Torch serialized object at the given URL. If downloaded file is a zip file, it will be automatically decompressed. If the object is already present in , it's deserialized and returned. The default value of `~torch.hub.get_dir``~torch.load` for more details. Example: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_HUB) >>> state_dict = torch.hub.load_state_dict_from_url( ... \" ... )", + "type": "function", + "file_path": "pytorch\\torch\\hub.py", + "ast_data": "FunctionDef name:load_state_dict_from_url arg:url arg:model_dir arg:map_location arg:progress arg:check_hash arg:file_name arg:weights_only arguments arg arg arg arg arg arg arg If Call Call If Compare Assign Call Assign Call Call Assign Call Assign Call If Compare Assign Assign Call If Call Call Assign If Assign Call Assign Call Call If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "many_to_many", + "source_code": "@cached_property\ndef many_to_many(self):\n return make_immutable_fields_list('many_to_many', (f for f in self._get_fields(reverse=False) if f.is_relation and f.many_to_many))", + "docstring": "Return a list of all many to many fields on the model and its parents. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this list.", + "type": "method", + "file_path": "django\\django\\db\\models\\options.py", + "ast_data": "FunctionDef name:many_to_many arg:self arguments arg Return return:yes Call Call BoolOp" + }, + { + "library": "tensorflow", + "name": "_get", + "source_code": "def _get(self):\n with distribute_lib.enter_or_assert_strategy(self._distribute_strategy):\n return super(SyncOnReadVariable, self)._get()", + "docstring": "Returns the value of SyncOnReadVariable based on surrounding context. If called under a non-default replica-context, returns the corresponding variable on that replica. If called under default replica-context or cross-replica context, returns the synced value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "FunctionDef name:_get arg:self arguments arg With Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "any_not_none", + "source_code": "def any_not_none(*args) -> bool:\n return any((arg is not None for arg in args))", + "docstring": "Returns a boolean indicating if any argument is not None.", + "type": "function", + "file_path": "pandas\\pandas\\core\\common.py", + "ast_data": "FunctionDef name:any_not_none arguments arg Return return:yes Call Compare" + }, + { + "library": "tensorflow", + "name": "_ExtractInputShapes", + "source_code": "def _ExtractInputShapes(inputs):\n if context.executing_eagerly():\n return array_ops.shape_n(inputs)\n sizes = []\n fully_known = True\n for x in inputs:\n input_shape = array_ops.shape(x)\n if not isinstance(input_shape, tensor.Tensor) or input_shape.op.type != 'Const':\n fully_known = False\n break\n sizes.append(input_shape)\n if fully_known:\n return sizes\n else:\n return array_ops.shape_n(inputs)", + "docstring": "Extract the shapes of a set of input tensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py", + "ast_data": "FunctionDef name:_ExtractInputShapes arg:inputs arguments arg If Call Return return:yes Call Assign Assign For Assign Call If BoolOp Call Compare Assign Call If Return return:yes Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_validate_column_callables", + "source_code": "def _validate_column_callables(self, X):\n all_columns = []\n transformer_to_input_indices = {}\n for name, _, columns in self.transformers:\n if callable(columns):\n columns = columns(X)\n all_columns.append(columns)\n transformer_to_input_indices[name] = _get_column_indices(X, columns)\n self._columns = all_columns\n self._transformer_to_input_indices = transformer_to_input_indices", + "docstring": "Converts callable column specifications. This stores a dictionary of the form and calls the on if is a callable for a given transformer. The results are then stored in .", + "type": "method", + "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py", + "ast_data": "FunctionDef name:_validate_column_callables arg:self arg:X arguments arg arg Assign Assign For If Call Assign Call Call Assign Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "read_var", + "source_code": "def read_var(self, replica_local_var):\n return array_ops.identity(replica_local_var)", + "docstring": "Read the aggregate value of a replica-local variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py", + "ast_data": "FunctionDef name:read_var arg:self arg:replica_local_var arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_add_op_node", + "source_code": "def _add_op_node(op, func, input_dict):\n func.node_def.extend([_get_node_def(op)])\n node_def = func.node_def[-1]\n for i in range(len(node_def.input)):\n if not node_def.input[i].startswith('^'):\n assert node_def.input[i] in input_dict, '%s missing from %s' % (node_def.input[i], input_dict.items())\n node_def.input[i] = input_dict[node_def.input[i]]\n if op.op_def is not None and op.op_def.is_stateful:\n func.signature.is_stateful = True", + "docstring": "Converts an op to a function def node and add it to .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_to_function_def.py", + "ast_data": "FunctionDef name:_add_op_node arg:op arg:func arg:input_dict arguments arg arg arg Call Call Assign For Call Call If Call Compare Call Assign If BoolOp Compare Assign" + }, + { + "library": "tensorflow", + "name": "_get_weighted_mean_squared_error", + "source_code": "def _get_weighted_mean_squared_error(self, quant_min, quant_max) -> tuple[float, float, float]:\n dequantized_hist_mids = self._get_dequantized_hist_mids_after_quantize(quant_min, quant_max)\n squared_error = (self._hist_mids - dequantized_hist_mids) ** 2\n weighted_error = np.sum(squared_error * self._hist_freq)\n return (weighted_error, quant_min, quant_max)", + "docstring": "Gets mean squared error between hist_mids and dequantized hist_mids. Quantization converts the range of numbers from [quant_min, quant_max] to [0, 2^num_bits - 1]. Values less than quant_min are converted to 0, and values greater than quant_max are converted to 2^num_bits - 1. Args: quant_min: The minimum real value that can be represented by a quantized value. quant_max: The maximum real value that can be represented by a quantized value. Returns: (error, quant_min, quant_max): Tuple of weighted mean squared error. error = (hist_mids - dequantized_hist_mids)**2 * hist_freq", + "type": "method", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py", + "ast_data": "FunctionDef name:_get_weighted_mean_squared_error arg:self arg:quant_min arg:quant_max arguments arg arg arg Assign Call Assign Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "register", + "source_code": "def register(self, name):\n\n def wrapper(writer_cls):\n self._registered[name] = writer_cls\n return writer_cls\n return wrapper", + "docstring": "Decorator for registering a class under a name. Example use:: @registry.register(name) class Foo: pass", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\animation.py", + "ast_data": "FunctionDef name:register arg:self arg:name arguments arg arg FunctionDef name:wrapper arg:writer_cls arguments arg Assign Return return:yes Return return:yes" + }, + { + "library": "sphinx", + "name": "set_application", + "source_code": "def set_application(self, app: Sphinx) -> None:\n self._app = app\n self.config = app.config\n self.env = app.env", + "docstring": "set_application will be called from Sphinx to set app and other instance variables :param sphinx.application.Sphinx app: Sphinx application object", + "type": "method", + "file_path": "sphinx\\sphinx\\parsers.py", + "ast_data": "FunctionDef name:set_application arg:self arg:app arguments arg arg Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "get_current_name_scope", + "source_code": "@tf_export('get_current_name_scope', v1=[])\ndef get_current_name_scope() -> str:\n ctx = context.context()\n if ctx.executing_eagerly():\n return ctx.scope_name.rstrip('/')\n else:\n return get_default_graph().get_name_scope()", + "docstring": "Returns current full name scope specified by s. For example, In other words, returns the op name prefix that will be prepended to, if an op is created at that place. Note that resets the name scope stack as shown below.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:get_current_name_scope arguments Assign Call If Call Return return:yes Call Return return:yes Call Call Call" + }, + { + "library": "numpy", + "name": "get_library_dirs", + "source_code": "def get_library_dirs(self):\n return self.library_dirs[:]", + "docstring": "List of compiler library directories.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py", + "ast_data": "FunctionDef name:get_library_dirs arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_clim", + "source_code": "def set_clim(self, vmin=None, vmax=None):\n self._colorizer.set_clim(vmin, vmax)", + "docstring": "Set the norm limits for image scaling. Parameters ---------- vmin, vmax : float The limits. For scalar data, the limits may also be passed as a tuple (*vmin*, *vmax*) as a single positional argument. .. ACCEPTS: (vmin: float, vmax: float)", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py", + "ast_data": "FunctionDef name:set_clim arg:self arg:vmin arg:vmax arguments arg arg arg Call" + }, + { + "library": "matplotlib", + "name": "process_figure_for_rasterizing", + "source_code": "def process_figure_for_rasterizing(fig, bbox_inches_restore, renderer, fixed_dpi=None):\n bbox_inches, restore_bbox = bbox_inches_restore\n restore_bbox()\n r = adjust_bbox(fig, bbox_inches, renderer, fixed_dpi)\n return (bbox_inches, r)", + "docstring": "A function that needs to be called when figure dpi changes during the drawing (e.g., rasterizing). It recovers the bbox and re-adjust it with the new dpi.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\_tight_bbox.py", + "ast_data": "FunctionDef name:process_figure_for_rasterizing arg:fig arg:bbox_inches_restore arg:renderer arg:fixed_dpi arguments arg arg arg arg Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "add_dtype_config", + "source_code": "def add_dtype_config(self, dtype_config: DTypeConfig) -> BackendPatternConfig:\n self.dtype_configs.append(dtype_config)\n return self", + "docstring": "Add a set of supported data types passed as arguments to quantize ops in the reference model spec.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py", + "ast_data": "FunctionDef name:add_dtype_config arg:self arg:dtype_config arguments arg arg Call Return return:yes" + }, + { + "library": "pytorch", + "name": "summary", + "source_code": "@no_type_check\ndef summary(self, top: int=20) -> None:\n op_diff: dict[str, float] = defaultdict(float)\n op_name, previous_allocated_memory = self.memories_allocated[0]\n for i in range(1, self._op_index):\n op_name, current_allocated_memory = self.memories_allocated[i]\n op_diff[op_name] = current_allocated_memory - previous_allocated_memory\n previous_allocated_memory = current_allocated_memory\n print('------------------------------------------------')\n print(f'The number of cuda retries are: {self._num_cuda_retries}')\n print(f'Top {top} ops that generates memory are:')\n for k, v in sorted(op_diff.items(), key=operator.itemgetter(1), reverse=True)[:top]:\n print(f'{k}: {v}MB')\n print('------------------------------------------------')", + "docstring": "Print out the top operators that generate the most memories. The number of the top operators can be configured.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\_tools\\memory_tracker.py", + "ast_data": "FunctionDef name:summary arg:self arg:top arguments arg arg Call Assign For Call Assign Assign Assign Call Call Call For Call Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "add", + "source_code": "def add(self, a):\n if a not in self._axes:\n self._axes[a] = next(self._counter)", + "docstring": "Add an Axes to the stack, ignoring it if already present.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:add arg:self arg:a arguments arg arg If Compare Assign Call" + }, + { + "library": "pandas", + "name": "_get_data_and_dtype_name", + "source_code": "def _get_data_and_dtype_name(data: ArrayLike):\n if isinstance(data, Categorical):\n data = data.codes\n if isinstance(data.dtype, DatetimeTZDtype):\n dtype_name = f'datetime64[{data.dtype.unit}]'\n else:\n dtype_name = data.dtype.name\n if data.dtype.kind in 'mM':\n data = np.asarray(data.view('i8'))\n elif isinstance(data, PeriodIndex):\n data = data.asi8\n data = np.asarray(data)\n return (data, dtype_name)", + "docstring": "Convert the passed data into a storable form and a dtype string.", + "type": "function", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:_get_data_and_dtype_name arg:data arguments arg If Call Assign If Call Assign Assign If Compare Assign Call Call If Call Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "enclosing_tpu_context_and_graph", + "source_code": "def enclosing_tpu_context_and_graph():\n graph = ops.get_default_graph()\n while graph is not None:\n ctx = graph._get_control_flow_context()\n while ctx is not None:\n if isinstance(ctx, tpu_replication.TPUReplicateContext):\n return (ctx, graph)\n ctx = ctx.outer_context\n graph = getattr(graph, 'outer_graph', None)\n return (None, None)", + "docstring": "Returns the TPUReplicateContext which exists inside a tpu.rewrite(), and its associated graph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_util.py", + "ast_data": "FunctionDef name:enclosing_tpu_context_and_graph arguments Assign Call While Compare Assign Call While Compare If Call Return return:yes Assign Assign Call Return return:no" + }, + { + "library": "tensorflow", + "name": "average_pooling3d", + "source_code": "def average_pooling3d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None):\n warnings.warn('`tf.layers.average_pooling3d` is deprecated and will be removed in a future version. Please use `tf.keras.layers.AveragePooling3D` instead.')\n layer = AveragePooling3D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name)\n return layer.apply(inputs)", + "docstring": "Average pooling layer for 3D inputs (e.g. volumes). Args: inputs: The tensor over which to pool. Must have rank 5. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. (default) and are supported. corresponds to inputs with shape while corresponds to inputs with shape . name: A string, the name of the layer. Returns: Output tensor. Raises: ValueError: if eager execution is enabled.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\pooling.py", + "ast_data": "FunctionDef name:average_pooling3d arg:inputs arg:pool_size arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "valid_torch_name", + "source_code": "def valid_torch_name(torch_name: TorchName | str) -> bool:\n return torch_name in _TORCH_NAME_TO_SCALAR_TYPE", + "docstring": "Return whether the given torch name is a valid torch type name.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_type_utils.py", + "ast_data": "FunctionDef name:valid_torch_name arg:torch_name arguments arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "_signature_types", + "source_code": "def _signature_types(self):\n if self._parameters.trace_mode in set([tensor_tracer_flags.TRACE_MODE_NAN_INF, tensor_tracer_flags.TRACE_MODE_NORM, tensor_tracer_flags.TRACE_MODE_HISTORY, tensor_tracer_flags.TRACE_MODE_MAX_ABS]):\n return {self._parameters.trace_mode: 0}\n if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:\n return self._parameters.summary_signatures\n return {}", + "docstring": "Returns a dictionary holding the order of signatures in the cache for the selected trace mode.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:_signature_types arg:self arguments arg If Compare Call Return return:yes If Compare Return return:yes Return return:no" + }, + { + "library": "scikit-learn", + "name": "_is_numpy_namespace", + "source_code": "def _is_numpy_namespace(xp):\n return xp.__name__ in _NUMPY_NAMESPACE_NAMES", + "docstring": "Return True if xp is backed by NumPy.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py", + "ast_data": "FunctionDef name:_is_numpy_namespace arg:xp arguments arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='hinge'):\n super().__init__(hinge, name=name, reduction=reduction)", + "docstring": "Initializes instance. Args: reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. Defaults to 'hinge'.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:reduction arg:name arguments arg arg arg Call Call" + }, + { + "library": "pytorch", + "name": "_rename_constants_nodes", + "source_code": "def _rename_constants_nodes(gm: torch.fx.GraphModule, graph_signature: ExportGraphSignature) -> None:\n node_names = {node.name for node in gm.graph.nodes}\n\n def rename_constant(name):\n if name in node_names:\n n = 1\n while (dup_name := f'{name}_{n}') in node_names:\n n += 1\n name = dup_name\n node_names.add(name)\n return name\n buffer_prefix = placeholder_prefixes[InputKind.BUFFER]\n const_prefix = placeholder_prefixes[InputKind.CONSTANT_TENSOR]\n buffer_to_constant = {}\n for spec in graph_signature.input_specs:\n if spec.kind == InputKind.CONSTANT_TENSOR and (not spec.arg.name.startswith(const_prefix)):\n if spec.arg.name.startswith(buffer_prefix):\n c_name = rename_constant(const_prefix + spec.arg.name[len(buffer_prefix):])\n else:\n c_name = rename_constant(const_prefix + spec.arg.name)\n buffer_to_constant[spec.arg.name] = c_name\n spec.arg.name = c_name\n for spec in graph_signature.output_specs:\n if spec.arg.name in buffer_to_constant:\n spec.arg.name = buffer_to_constant[spec.arg.name]\n for mod in gm.modules():\n if not isinstance(mod, torch.fx.GraphModule):\n continue\n for node in mod.graph.nodes:\n if node.name in buffer_to_constant:\n node.name = node.target = buffer_to_constant[node.name]\n mod.recompile()", + "docstring": "For strict mode, rename constants nodes that were previously annotated as buffers.", + "type": "function", + "file_path": "pytorch\\torch\\export\\_trace.py", + "ast_data": "FunctionDef name:_rename_constants_nodes arg:gm arg:graph_signature arguments arg arg Assign FunctionDef name:rename_constant arg:name arguments arg If Compare Assign While Compare Assign Call Return return:yes Assign Assign Assign For If BoolOp Compare Call If Call Assign Call Call Assign Call Assign Assign For If Compare Assign For Call If Call For If Compare Assign Call" + }, + { + "library": "scipy", + "name": "_matstruct_to_dict", + "source_code": "def _matstruct_to_dict(matobj):\n d = {}\n for f in matobj._fieldnames:\n elem = matobj.__dict__[f]\n if isinstance(elem, mat_struct):\n d[f] = _matstruct_to_dict(elem)\n elif _has_struct(elem):\n d[f] = _inspect_cell_array(elem)\n else:\n d[f] = elem\n return d", + "docstring": "Construct nested dicts from mat_struct objects.", + "type": "function", + "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py", + "ast_data": "FunctionDef name:_matstruct_to_dict arg:matobj arguments arg Assign For Assign If Call Assign Call If Call Assign Call Assign Return return:yes" + }, + { + "library": "sphinx", + "name": "build_toc", + "source_code": "def build_toc(self) -> None:\n logger.info(__('writing toc.ncx file...'))\n if self.config.epub_tocscope == 'default':\n doctree = self.env.get_and_resolve_doctree(self.config.root_doc, self, prune_toctrees=False, includehidden=False)\n refnodes = self.get_refnodes(doctree, [])\n self.toc_add_files(refnodes)\n else:\n refnodes = self.refnodes\n self.check_refnodes(refnodes)\n navpoints = self.build_navpoints(refnodes)\n level = max((item['level'] for item in self.refnodes))\n level = min(level, self.config.epub_tocdepth)\n copy_asset_file(self.template_dir / 'toc.ncx.jinja', self.outdir, context=self.toc_metadata(level, navpoints), force=True)", + "docstring": "Write the metainfo file toc.ncx.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\_epub_base.py", + "ast_data": "FunctionDef name:build_toc arg:self arguments arg Call Call If Compare Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "collective_leader", + "source_code": "def collective_leader(cluster_spec, task_type, task_id):\n cluster_spec = normalize_cluster_spec(cluster_spec)\n if not cluster_spec.as_dict():\n return ''\n _validate_cluster_spec(cluster_spec, task_type, task_id)\n if task_type == 'evaluator':\n return ''\n if 'chief' in cluster_spec.jobs:\n return '/job:chief/replica:0/task:0'\n assert 'worker' in cluster_spec.jobs\n return '/job:worker/replica:0/task:0'", + "docstring": "Return the job name for the leader of for collective ops. Args: cluster_spec: a dict, or object specifying the cluster configurations. task_type: the task type in the cluster. task_id: the task id in the cluster. Returns: a string indicating the leader job name or empty string if no need to set leader job.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_worker_util.py", + "ast_data": "FunctionDef name:collective_leader arg:cluster_spec arg:task_type arg:task_id arguments arg arg arg Assign Call If Call Return return:yes Call If Compare Return return:yes If Compare Return return:yes Compare Return return:yes" + }, + { + "library": "scipy", + "name": "deg_simplex", + "source_code": "def deg_simplex(self, S, proj=None):\n if proj is None:\n proj = S[1:] - S[0]\n if np.linalg.det(proj) == 0.0:\n return True\n else:\n return False", + "docstring": "Test a simplex S for degeneracy (linear dependence in R^dim). Parameters ---------- S : np.array Simplex with rows as vertex vectors proj : array, optional, If the projection S[1:] - S[0] is already computed it can be added as an optional argument.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_complex.py", + "ast_data": "FunctionDef name:deg_simplex arg:self arg:S arg:proj arguments arg arg arg If Compare Assign If Compare Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "load_spmv_dataset", + "source_code": "def load_spmv_dataset(dataset_path, hidden_size, sparsity, device, n_limit=math.inf):\n current_folder_path = f'{dataset_path}/{sparsity}'\n path = Path(current_folder_path)\n files = path.glob('**/*.smtx')\n print(dataset_path, hidden_size, sparsity)\n index = 0\n x_files, y_files = ([], [])\n for f in files:\n if index >= n_limit:\n break\n print('.', end='')\n size, nnz = read_matrix_params(f.as_posix())\n if size[1] == hidden_size:\n x_files.append(f.as_posix())\n if size[0] == hidden_size:\n y_files.append(f.as_posix())\n index += 1\n print()\n for fx, fy in zip(x_files, y_files):\n x = load_sparse_matrix(fx, device)\n y = gen_vector(fy, device)\n yield (x, y)", + "docstring": "load_spmv_dataset loads a DLMC dataset for a sparse matrix-vector multiplication (SPMV) performance test. Args: dataset_path: path of the dataset from DLMC collection. hidden_size This value allows tensors of varying sizes. sparsity: This value allows tensors of varying sparsities. device: Whether to place the Tensor on a GPU or CPU. n_limit: This value allows a dataset with some limit size.", + "type": "function", + "file_path": "pytorch\\benchmarks\\sparse\\dlmc\\utils.py", + "ast_data": "FunctionDef name:load_spmv_dataset arg:dataset_path arg:hidden_size arg:sparsity arg:device arg:n_limit arguments arg arg arg arg arg Assign Assign Call Assign Call Call Assign Assign For If Compare Call Assign Call Call If Compare Call Call If Compare Call Call Call For Call Assign Call Assign Call" + }, + { + "library": "scipy", + "name": "_read_string_data", + "source_code": "def _read_string_data(f):\n length = _read_long(f)\n if length > 0:\n length = _read_long(f)\n string_data = _read_bytes(f, length)\n _align_32(f)\n else:\n string_data = ''\n return string_data", + "docstring": "Read a data string (length is specified twice)", + "type": "function", + "file_path": "scipy\\scipy\\io\\_idl.py", + "ast_data": "FunctionDef name:_read_string_data arg:f arguments arg Assign Call If Compare Assign Call Assign Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_irfft_grad_helper", + "source_code": "def _irfft_grad_helper(rank, rfft_fn):\n assert rank in (1, 2), 'Gradient for IRFFT3D is not implemented.'\n\n def _grad(op, grad):\n fft_length = op.inputs[1]\n fft_length_static = _tensor_util.constant_value(fft_length)\n if fft_length_static is not None:\n fft_length = fft_length_static\n real_dtype = grad.dtype\n if real_dtype == _dtypes.float32:\n complex_dtype = _dtypes.complex64\n elif real_dtype == _dtypes.float64:\n complex_dtype = _dtypes.complex128\n is_odd = _math_ops.mod(fft_length[-1], 2)\n input_last_dimension = _array_ops.shape(op.inputs[0])[-1]\n mask = _array_ops.concat([[1.0], 2.0 * _array_ops.ones([input_last_dimension - 2 + is_odd], real_dtype), _array_ops.ones([1 - is_odd], real_dtype)], 0)\n rsize = _math_ops.reciprocal(_math_ops.cast(_fft_size_for_grad(grad, rank), real_dtype))\n the_rfft = rfft_fn(grad, fft_length)\n return (the_rfft * _math_ops.cast(rsize * mask, complex_dtype), None)\n return _grad", + "docstring": "Returns a gradient function for an IRFFT of the provided rank.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\fft_ops.py", + "ast_data": "FunctionDef name:_irfft_grad_helper arg:rank arg:rfft_fn arguments arg arg Compare FunctionDef name:_grad arg:op arg:grad arguments arg arg Assign Assign Call If Compare Assign Assign If Compare Assign If Compare Assign Assign Call Assign Call Assign Call Call Call Assign Call Call Call Assign Call Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "register_user_hooks", + "source_code": "def register_user_hooks(self, pre_fw_hook: Optional[Callable]=None, post_fw_hook: Optional[Callable]=None, pre_bw_hook: Optional[Callable]=None, post_bw_hook: Optional[Callable]=None):\n\n def set_hook(hook, user_hook, hook_name):\n if hook is not None and user_hook is not None:\n raise AssertionError(f'Only one {hook_name} can be registered at a time Clear the existing hook by calling ``clear_user_hooks`` before registering a new one')\n return hook\n self._user_pre_fw_hook = set_hook(pre_fw_hook, self._user_pre_fw_hook, 'pre_fw_hook')\n self._user_post_fw_hook = set_hook(post_fw_hook, self._user_post_fw_hook, 'post_fw_hook')\n self._user_pre_bw_hook = set_hook(pre_bw_hook, self._user_pre_bw_hook, 'pre_bw_hook')\n self._user_post_bw_hook = set_hook(post_bw_hook, self._user_post_bw_hook, 'post_bw_hook')", + "docstring": "Registers user-specified hooks to be called before/after the forward/backward pass for each module tracked by the `` attribute when each of the hooks is called. Hooks are intended to be used as markers only not to modify the inputs/outputs.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\_tools\\mod_tracker.py", + "ast_data": "FunctionDef name:register_user_hooks arg:self arg:pre_fw_hook arg:post_fw_hook arg:pre_bw_hook arg:post_bw_hook arguments arg arg arg arg arg FunctionDef name:set_hook arg:hook arg:user_hook arg:hook_name arguments arg arg arg If BoolOp Compare Compare Raise Call Return return:yes Assign Call Assign Call Assign Call Assign Call" + }, + { + "library": "scipy", + "name": "_getrow", + "source_code": "def _getrow(self, i):\n M, N = self.shape\n i = int(i)\n if i < 0:\n i += M\n if i < 0 or i >= M:\n raise IndexError(f'index ({i}) out of range')\n return self._get_submatrix(minor=i).tocsr()", + "docstring": "Returns a copy of row i of the matrix, as a (1 x n) CSR matrix (row vector).", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_csc.py", + "ast_data": "FunctionDef name:_getrow arg:self arg:i arguments arg arg Assign Assign Call If Compare If BoolOp Compare Compare Raise Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_replica_context", + "source_code": "@tf_export('distribute.get_replica_context')\ndef get_replica_context():\n return _get_per_thread_mode().replica_context", + "docstring": "Returns the current or . Returns if in a cross-replica context. Note that execution: 1. starts in the default (single-replica) replica context (this function will return the default object); 2. switches to cross-replica context (in which case this will return ) when entering a block; 3. switches to a (non-default) replica context inside ; 4. if calls , then inside you are back in the cross-replica context (and again this function will return ). Most methods may only be executed in a cross-replica context, in a replica context you should use the API of the object returned by this method instead. Returns: The current object when in a replica context scope, else . Within a particular block, exactly one of these two things will be true: * returns non-, or * returns True.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:get_replica_context arguments Return return:yes Call Call" + }, + { + "library": "django", + "name": "ManyToManyRel", + "source_code": "class ManyToManyRel(ForeignObjectRel):\n\n def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None, symmetrical=True, through=None, through_fields=None, db_constraint=True):\n super().__init__(field, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to)\n if through and (not db_constraint):\n raise ValueError(\"Can't supply a through model and db_constraint=False\")\n self.through = through\n if through_fields and (not through):\n raise ValueError('Cannot specify through_fields without a through model')\n self.through_fields = through_fields\n self.symmetrical = symmetrical\n self.db_constraint = db_constraint\n\n @property\n def identity(self):\n return (*super().identity, self.through, make_hashable(self.through_fields), self.db_constraint)\n\n def get_related_field(self):\n opts = self.through._meta\n if self.through_fields:\n field = opts.get_field(self.through_fields[0])\n else:\n for field in opts.fields:\n rel = getattr(field, 'remote_field', None)\n if rel and rel.model == self.model:\n break\n return field.foreign_related_fields[0]", + "docstring": "Used by ManyToManyField to store information about the relation. `` returns this class to provide access to the field flags for the reverse relation.", + "type": "class", + "file_path": "django\\django\\db\\models\\fields\\reverse_related.py", + "ast_data": "ClassDef name:ManyToManyRel FunctionDef name:__init__ arg:self arg:field arg:to arg:related_name arg:related_query_name arg:limit_choices_to arg:symmetrical arg:through arg:through_fields arg:db_constraint arguments arg arg arg arg arg arg arg arg arg arg Call Call If BoolOp Raise Call Assign If BoolOp Raise Call Assign Assign Assign FunctionDef name:identity arg:self arguments arg Return return:yes Call Call FunctionDef name:get_related_field arg:self arguments arg Assign If Assign Call For Assign Call If BoolOp Compare Return return:yes" + }, + { + "library": "scipy", + "name": "toarray", + "source_code": "def toarray(self, order=None, out=None):\n return self.tocoo(copy=False).toarray(order=order, out=out)", + "docstring": "Return a dense ndarray representation of this sparse array/matrix. Parameters ---------- order : {'C', 'F'}, optional Whether to store multidimensional data in C (row-major) or Fortran (column-major) order in memory. The default is 'None', which provides no ordering guarantees. Cannot be specified in conjunction with the argument. out : ndarray, 2-D, optional If specified, uses this array as the output buffer instead of allocating a new array to return. The provided array must have the same shape and dtype as the sparse array/matrix on which you are calling the method. For most sparse types, is required to be memory contiguous (either C or Fortran ordered). Returns ------- arr : ndarray, 2-D An array with the same shape and containing the same data represented by the sparse array/matrix, with the requested memory order. If was passed, the same object is returned after being modified in-place to contain the appropriate values.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_base.py", + "ast_data": "FunctionDef name:toarray arg:self arg:order arg:out arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pygame", + "name": "add_internal", + "source_code": "def add_internal(self, group):\n self.__g.add(group)", + "docstring": "For adding this sprite to a group internally. :param group: The group we are adding to.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:add_internal arg:self arg:group arguments arg arg Call" + }, + { + "library": "pytorch", + "name": "get_worker_info", + "source_code": "@_require_initialized\ndef get_worker_info(worker_name=None):\n if worker_name is not None:\n return _get_current_rpc_agent().get_worker_info(worker_name)\n else:\n return _get_current_rpc_agent().get_worker_info()", + "docstring": "Get :class: of a given worker name. Use this :class: to avoid passing an expensive string on every invocation. Args: worker_name (str): the string name of a worker. If `~torch.distributed.rpc.WorkerInfo~torch.distributed.rpc.WorkerInfo`.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\rpc\\api.py", + "ast_data": "FunctionDef name:get_worker_info arg:worker_name arguments arg If Compare Return return:yes Call Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "__contains__", + "source_code": "@final\ndef __contains__(self, key) -> bool:\n return key in self._info_axis", + "docstring": "True if the key is in the info axis", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:__contains__ arg:self arg:key arguments arg arg Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "GradInplaceRequiresGradCtxManagerVariable", + "source_code": "class GradInplaceRequiresGradCtxManagerVariable(ContextWrappingVariable):\n\n @staticmethod\n def create(tx: 'InstructionTranslator', target_values, **kwargs):\n return GradInplaceRequiresGradCtxManagerVariable(target_values=target_values, initial_values=None, **kwargs)\n\n def enter(self, tx):\n [enabled] = self.target_values\n self.prev_state = torch._C._functorch.get_inplace_requires_grad_allowed()\n torch._C._functorch.set_inplace_requires_grad_allowed(enabled)\n self.set_cleanup_hook(tx, lambda: torch._C._functorch.set_inplace_requires_grad_allowed(self.prev_state))\n self.proxy = tx.output.create_node('call_function', torch._C._functorch.set_inplace_requires_grad_allowed, (enabled,), {})\n return variables.ConstantVariable.create(None)\n\n def exit(self, tx: 'InstructionTranslator', *args):\n self.cleanup()\n tx.output.create_node('call_function', torch._C._functorch.set_inplace_requires_grad_allowed, (self.prev_state,), {})\n return variables.ConstantVariable.create(None)", + "docstring": "represents torch grad requries grad", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\variables\\ctx_manager.py", + "ast_data": "ClassDef name:GradInplaceRequiresGradCtxManagerVariable FunctionDef name:create arg:tx arg:target_values arguments arg arg arg Return return:yes Call FunctionDef name:enter arg:self arg:tx arguments arg arg Assign Assign Call Call Call arguments Call Assign Call Return return:yes Call FunctionDef name:exit arg:self arg:tx arguments arg arg arg Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "initialize_rng_states", + "source_code": "def initialize_rng_states(num_rng: int, graphsafe_idx: int, fwd_rng_states: list[torch.Generator], bwd_rng_states: list[torch.Generator]):\n with torch.utils._python_dispatch._disable_current_modes():\n seeds = torch.randint(0, torch.iinfo(torch.int64).max, (num_rng,), device='cpu')\n fwd_rng_states.extend([torch.cuda.default_generators[graphsafe_idx].clone_state().manual_seed(int(seeds[i])) for i in range(num_rng)])\n bwd_rng_states.extend([torch.cuda.default_generators[graphsafe_idx].clone_state().manual_seed(int(seeds[i])) for i in range(num_rng)])", + "docstring": "Initialize the cudagraph safe rng states. Initialization of rng states should have a few properties: - the initialization for each rng state should be independent - the initialization should be deterministic - the initialization should be based off current rng state, so that independent graphs do not have equal rng behavior We defer initialization of rng states until runtime because compilation is wrapped with preserve_rng_states. Seed initialization should advance the rng states so consecutive compilations do not give equal randomness.", + "type": "function", + "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\runtime_wrappers.py", + "ast_data": "FunctionDef name:initialize_rng_states arg:num_rng arg:graphsafe_idx arg:fwd_rng_states arg:bwd_rng_states arguments arg arg arg arg With Call Assign Call Call Call Call Call Call Call Call Call Call Call Call" + }, + { + "library": "scipy", + "name": "compute_b_inv", + "source_code": "def compute_b_inv(A):\n\n def find_b_inv_elem(i, j, U, D, B):\n rng = min(3, n - i - 1)\n rng_sum = 0.0\n if j == 0:\n for k in range(1, rng + 1):\n rng_sum -= U[-k - 1, i + k] * B[-k - 1, i + k]\n rng_sum += D[i]\n B[-1, i] = rng_sum\n else:\n for k in range(1, rng + 1):\n diag = abs(k - j)\n ind = i + min(k, j)\n rng_sum -= U[-k - 1, i + k] * B[-diag - 1, ind + diag]\n B[-j - 1, i + j] = rng_sum\n U = cholesky_banded(A)\n for i in range(2, 5):\n U[-i, i - 1:] /= U[-1, :-i + 1]\n D = 1.0 / U[-1] ** 2\n U[-1] /= U[-1]\n n = U.shape[1]\n B = np.zeros(shape=(4, n))\n for i in range(n - 1, -1, -1):\n for j in range(min(3, n - i - 1), -1, -1):\n find_b_inv_elem(i, j, U, D, B)\n B[0] = [0.0] * n\n return B", + "docstring": "Inverse 3 central bands of matrix :math: assuming that `10.1007/BF01389878`", + "type": "function", + "file_path": "scipy\\scipy\\interpolate\\_bsplines.py", + "ast_data": "FunctionDef name:compute_b_inv arg:A arguments arg FunctionDef name:find_b_inv_elem arg:i arg:j arg:U arg:D arg:B arguments arg arg arg arg arg Assign Call Assign If Compare For Call Assign For Call Assign Call Assign Call Assign Assign Call For Call Assign Assign Assign Call For Call For Call Call Call Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "@available_if(_check_novelty_predict)\ndef predict(self, X=None):\n return self._predict(X)", + "docstring": "Predict the labels (1 inlier, -1 outlier) of X according to LOF. **Only available for novelty detection (when novelty is set to True).** This method allows to generalize prediction to *new observations* (not in the training set). Note that the result of ``. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. Returns ------- is_inlier : ndarray of shape (n_samples,) Returns -1 for anomalies/outliers and +1 for inliers.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neighbors\\_lof.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "DistributedDatasetsFromFunctionSpec", + "source_code": "class DistributedDatasetsFromFunctionSpec(DistributedDatasetAndIteratorSpec):\n\n @property\n def value_type(self):\n return DistributedDatasetsFromFunction\n\n @property\n def _component_specs(self):\n specs = []\n worker_device_pairs = self._input_workers._worker_device_pairs\n for i, _ in enumerate(worker_device_pairs):\n element_spec = nest.map_structure(functools.partial(_replace_per_replica_spec, i=i), self._element_spec)\n specs.append(dataset_ops.DatasetSpec(element_spec))\n return specs\n\n def _to_components(self, value):\n return value._datasets\n\n def _from_components(self, components):\n return DistributedDatasetsFromFunction(input_workers=self._input_workers, strategy=self._strategy, components=components, element_spec=self._element_spec, options=self._options)\n\n @staticmethod\n def from_value(value):\n return DistributedDatasetsFromFunctionSpec(input_workers=value._input_workers, element_spec=value._element_spec, strategy=value._strategy, options=value._options)", + "docstring": "Type specification for `DistributedDatasetsFromFunction.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py", + "ast_data": "ClassDef name:DistributedDatasetsFromFunctionSpec FunctionDef name:value_type arg:self arguments arg Return return:yes FunctionDef name:_component_specs arg:self arguments arg Assign Assign For Call Assign Call Call Call Call Return return:yes FunctionDef name:_to_components arg:self arg:value arguments arg arg Return return:yes FunctionDef name:_from_components arg:self arg:components arguments arg arg Return return:yes Call FunctionDef name:from_value arg:value arguments arg Return return:yes Call" + }, + { + "library": "authlib", + "name": "validate_software_id", + "source_code": "def validate_software_id(self):\n pass", + "docstring": "A unique identifier string (e.g., a Universally Unique Identifier (UUID)) assigned by the client developer or software publisher used by registration endpoints to identify the client software to be dynamically registered. Unlike \"client_id\", which is issued by the authorization server and SHOULD vary between instances, the \"software_id\" SHOULD remain the same for all instances of the client software. The \"software_id\" SHOULD remain the same across multiple updates or versions of the same piece of software. The value of this field is not intended to be human readable and is usually opaque to the client and authorization server.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py", + "ast_data": "FunctionDef name:validate_software_id arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "try_except_torch_function_mode", + "source_code": "def try_except_torch_function_mode(self, code_options, cleanup: list[Instruction]):\n from .variables.torch_function import get_prev_stack_var_name\n setup_try_except, epilogue = _bytecode_from_template_with_split(_try_except_tf_mode_template, self.stack_index, varname_map={'stack_var_name': get_prev_stack_var_name()})\n cleanup[:] = epilogue + cleanup\n return setup_try_except", + "docstring": "Codegen based off of: try: (rest) except: (restore previous tf mode stack) raise", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\resume_execution.py", + "ast_data": "FunctionDef name:try_except_torch_function_mode arg:self arg:code_options arg:cleanup arguments arg arg arg Assign Call Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "validate_max_step", + "source_code": "def validate_max_step(max_step):\n if max_step <= 0:\n raise ValueError('`max_step` must be positive.')\n return max_step", + "docstring": "Assert that max_Step is valid and return it.", + "type": "function", + "file_path": "scipy\\scipy\\integrate\\_ivp\\common.py", + "ast_data": "FunctionDef name:validate_max_step arg:max_step arguments arg If Compare Raise Call Return return:yes" + }, + { + "library": "pandas", + "name": "_validate_indexer", + "source_code": "@final\ndef _validate_indexer(self, form: Literal['positional', 'slice'], key, kind: Literal['getitem', 'iloc']) -> None:\n if not lib.is_int_or_none(key):\n self._raise_invalid_indexer(form, key)", + "docstring": "If we are positional indexer, validate that we have appropriate typed bounds must be an integer.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_validate_indexer arg:self arg:form arg:key arg:kind arguments arg arg arg arg If Call Call" + }, + { + "library": "kornia", + "name": "affine3d", + "source_code": "def affine3d(tensor: Tensor, matrix: Tensor, mode: str='bilinear', padding_mode: str='zeros', align_corners: bool=False) -> Tensor:\n is_unbatched: bool = tensor.ndimension() == 4\n if is_unbatched:\n tensor = torch.unsqueeze(tensor, dim=0)\n matrix = matrix.expand(tensor.shape[0], -1, -1)\n depth: int = tensor.shape[-3]\n height: int = tensor.shape[-2]\n width: int = tensor.shape[-1]\n warped: Tensor = warp_affine3d(tensor, matrix, (depth, height, width), mode, padding_mode, align_corners)\n if is_unbatched:\n warped = torch.squeeze(warped, dim=0)\n return warped", + "docstring": "Apply an affine transformation to the 3d volume. Args: tensor: The image tensor to be warped in shapes of :math:, :math: and :math:. matrix: The affine transformation matrix with shape :math:. mode: interpolation mode to calculate output values ``. align_corners: interpolation flag. Returns: The warped image. Example: >>> img = torch.rand(1, 2, 4, 3, 5) >>> aff = torch.eye(3, 4)[None] >>> out = affine3d(img, aff) >>> print(out.shape) torch.Size([1, 2, 4, 3, 5])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py", + "ast_data": "FunctionDef name:affine3d arg:tensor arg:matrix arg:mode arg:padding_mode arg:align_corners arguments arg arg arg arg arg Compare Call If Assign Call Assign Call Call If Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "op", + "source_code": "@property\ndef op(self) -> ops.Operation:\n return self.values.op", + "docstring": "The that produces as an output.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py", + "ast_data": "FunctionDef name:op arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "__get__", + "source_code": "def __get__(self, instance, cls=None):\n if instance is None:\n return self\n pattern = instance._regex\n if isinstance(pattern, str):\n instance.__dict__['regex'] = self._compile(pattern)\n return instance.__dict__['regex']\n language_code = get_language()\n if language_code not in instance._regex_dict:\n instance._regex_dict[language_code] = self._compile(str(pattern))\n return instance._regex_dict[language_code]", + "docstring": "Return a compiled regular expression based on the active language.", + "type": "method", + "file_path": "django\\django\\urls\\resolvers.py", + "ast_data": "FunctionDef name:__get__ arg:self arg:instance arg:cls arguments arg arg arg If Compare Return return:yes Assign If Call Assign Call Return return:yes Assign Call If Compare Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "update_state", + "source_code": "def update_state(self, y_true, y_pred, sample_weight=None):\n return metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight)", + "docstring": "Accumulates true positive and false negative statistics. Args: y_true: The ground truth values, with the same dimensions as . Will be cast to . y_pred: The predicted values. Each element must be in the range . sample_weight: Optional weighting of each example. Defaults to 1. Can be a whose rank is either 0, or the same rank as , and must be broadcastable to . Returns: Update op.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "FunctionDef name:update_state arg:self arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "GetWhileContext", + "source_code": "def GetWhileContext(op):\n ctxt = op._get_control_flow_context()\n if ctxt:\n ctxt = ctxt.GetWhileContext()\n return ctxt", + "docstring": "Get the WhileContext to which this op belongs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py", + "ast_data": "FunctionDef name:GetWhileContext arg:op arguments arg Assign Call If Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_label_position", + "source_code": "def set_label_position(self, position):\n self.label.set_rotation_mode('anchor')\n self.label.set_verticalalignment(_api.check_getitem({'left': 'bottom', 'right': 'top'}, position=position))\n self.label_position = position\n self.stale = True", + "docstring": "Set the label position (left or right) Parameters ---------- position : {'left', 'right'}", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:set_label_position arg:self arg:position arguments arg arg Call Call Call Assign Assign" + }, + { + "library": "scipy", + "name": "derivative", + "source_code": "def derivative(self, nu=1):\n c = self.c.copy()\n ct = len(self.t) - len(c)\n if ct > 0:\n c = np.r_[c, np.zeros((ct,) + c.shape[1:])]\n tck = _fitpack_impl.splder((self.t, c, self.k), nu)\n return self.construct_fast(*tck, extrapolate=self.extrapolate, axis=self.axis)", + "docstring": "Return a B-spline representing the derivative. Parameters ---------- nu : int, optional Derivative order. Default is 1. Returns ------- b : object A new instance representing the derivative. See Also -------- splder, splantider", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_bsplines.py", + "ast_data": "FunctionDef name:derivative arg:self arg:nu arguments arg arg Assign Call Assign Call Call If Compare Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "ModuleWrapPolicy", + "source_code": "class ModuleWrapPolicy(_Policy):\n\n def __init__(self, module_classes: Iterable[type[nn.Module]]):\n module_classes_set = set(module_classes)\n self._module_classes = module_classes_set\n self._module_classes_str = str(module_classes_set)\n\n def _run_policy(self, root_module: nn.Module, ignored_modules: set[nn.Module], root_kwargs: dict[str, Any]) -> dict[nn.Module, dict[str, Any]]:\n module_classes = tuple(self._module_classes)\n target_module_to_kwargs: dict[nn.Module, dict[str, Any]] = {}\n for module in root_module.modules():\n if module in ignored_modules:\n continue\n elif isinstance(module, module_classes):\n target_module_to_kwargs[module] = copy.copy(root_kwargs)\n return target_module_to_kwargs\n\n def __call__(self, module, recurse, *args, **kwargs):\n return _module_wrap_policy(module, recurse, nonwrapped_numel=-1, module_classes=self._module_classes)\n\n def __repr__(self) -> str:\n return super().__repr__() + f'({self._module_classes_str})'", + "docstring": "This policy applies to every module of the specified module classes, passing in the kwargs given to the root.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py", + "ast_data": "ClassDef name:ModuleWrapPolicy FunctionDef name:__init__ arg:self arg:module_classes arguments arg arg Assign Call Assign Assign Call FunctionDef name:_run_policy arg:self arg:root_module arg:ignored_modules arg:root_kwargs arguments arg arg arg arg Assign Call For Call If Compare If Call Assign Call Return return:yes FunctionDef name:__call__ arg:self arg:module arg:recurse arguments arg arg arg arg arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "isbuiltin", + "source_code": "def isbuiltin(object):\n return _inspect.isbuiltin(tf_decorator.unwrap(object)[1])", + "docstring": "TFDecorator-aware replacement for inspect.isbuiltin.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py", + "ast_data": "FunctionDef name:isbuiltin arg:object arguments arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "pipe", + "source_code": "def pipe(obj: _T, func: Callable[Concatenate[_T, P], T] | tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T:\n if isinstance(func, tuple):\n func_, target = func\n if target in kwargs:\n msg = f'{target} is both the pipe target and a keyword argument'\n raise ValueError(msg)\n kwargs[target] = obj\n return func_(*args, **kwargs)\n else:\n return func(obj, *args, **kwargs)", + "docstring": "Apply a function ``.", + "type": "function", + "file_path": "pandas\\pandas\\core\\common.py", + "ast_data": "FunctionDef name:pipe arg:obj arg:func arguments arg arg arg arg If Call Assign If Compare Assign Raise Call Assign Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_input_shapes", + "source_code": "def _get_input_shapes(self, tensors, per_replica: bool, in_tpu_context: bool) -> List[TensorShape]:\n input_shapes = []\n for (path, maybe_tensor), feature in zip(nest.flatten_with_joined_string_paths(tensors), nest.flatten(self._feature_config)):\n if not in_tpu_context:\n tensor = distribute_utils.select_replica(0, maybe_tensor)\n else:\n tensor = maybe_tensor\n if isinstance(tensor, tensor_lib.Tensor):\n input_shapes.append(self._get_input_shape_for_tensor(tensor, feature, per_replica, path))\n elif isinstance(tensor, sparse_tensor.SparseTensor):\n input_shapes.append(self._get_input_shape_for_sparse_tensor(tensor, feature, per_replica, path))\n elif isinstance(tensor, ragged_tensor.RaggedTensor):\n input_shapes.append(self._get_input_shape_for_ragged_tensor(tensor, feature, per_replica, path))\n return input_shapes", + "docstring": "Get the input shapes from the input tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py", + "ast_data": "FunctionDef name:_get_input_shapes arg:self arg:tensors arg:per_replica arg:in_tpu_context arguments arg arg arg arg Assign For Call Call Call If Assign Call Assign If Call Call Call If Call Call Call If Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_get_estimator", + "source_code": "@abstractmethod\ndef _get_estimator(self):\n pass", + "docstring": "Model to be fitted after the best alpha has been determined.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_coordinate_descent.py", + "ast_data": "FunctionDef name:_get_estimator arg:self arguments arg" + }, + { + "library": "django", + "name": "check_constraints", + "source_code": "def check_constraints(self, table_names=None):\n with self.cursor() as cursor:\n cursor.execute('SET CONSTRAINTS ALL IMMEDIATE')\n cursor.execute('SET CONSTRAINTS ALL DEFERRED')", + "docstring": "Check constraints by setting them to immediate. Return them to deferred afterward.", + "type": "method", + "file_path": "django\\django\\db\\backends\\oracle\\base.py", + "ast_data": "FunctionDef name:check_constraints arg:self arg:table_names arguments arg arg With Call Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, ui_type='readline', dump_root=None, thread_name_filter=None, config_file_path=None):\n self._ui_type = ui_type\n self._dump_root = dump_root\n self._thread_name_filter = thread_name_filter\n self._session_wrapper = None\n self._pending_tensor_filters = {}\n self._config_file_path = config_file_path", + "docstring": "Create a local debugger command-line interface (CLI) hook. Args: ui_type: () requested user-interface type. Currently supported: (readline). dump_root: () optional path to the dump root directory. Must be a directory that does not exist or an empty directory. If the directory does not exist, it will be created by the debugger core during debug calls and removed afterwards. thread_name_filter: Regular-expression white list for threads on which the wrapper session will be active. See doc of for more details. config_file_path: Optional override to the default configuration file path, which is at .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\hooks.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:ui_type arg:dump_root arg:thread_name_filter arg:config_file_path arguments arg arg arg arg arg Assign Assign Assign Assign Assign Assign" + }, + { + "library": "django", + "name": "boundary", + "source_code": "@property\ndef boundary(self):\n return self._geomgen(capi.get_boundary)", + "docstring": "Return the boundary of this geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:boundary arg:self arguments arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "RelativeRiskResult", + "source_code": "@dataclass\nclass RelativeRiskResult:\n relative_risk: float\n exposed_cases: int\n exposed_total: int\n control_cases: int\n control_total: int\n\n def confidence_interval(self, confidence_level=0.95):\n if not 0 <= confidence_level <= 1:\n raise ValueError('confidence_level must be in the interval [0, 1].')\n if self.exposed_cases == 0 and self.control_cases == 0:\n return ConfidenceInterval(low=np.nan, high=np.nan)\n elif self.exposed_cases == 0:\n return ConfidenceInterval(low=0.0, high=np.nan)\n elif self.control_cases == 0:\n return ConfidenceInterval(low=np.nan, high=np.inf)\n alpha = 1 - confidence_level\n z = ndtri(1 - alpha / 2)\n rr = self.relative_risk\n se = np.sqrt(1 / self.exposed_cases - 1 / self.exposed_total + 1 / self.control_cases - 1 / self.control_total)\n delta = z * se\n katz_lo = rr * np.exp(-delta)\n katz_hi = rr * np.exp(delta)\n return ConfidenceInterval(low=katz_lo, high=katz_hi)", + "docstring": "Result of . Attributes ---------- relative_risk : float This is:: (exposed_cases/exposed_total) / (control_cases/control_total) exposed_cases : int The number of \"cases\" (i.e. occurrence of disease or other event of interest) among the sample of \"exposed\" individuals. exposed_total : int The total number of \"exposed\" individuals in the sample. control_cases : int The number of \"cases\" among the sample of \"control\" or non-exposed individuals. control_total : int The total number of \"control\" individuals in the sample. Methods ------- confidence_interval : Compute the confidence interval for the relative risk estimate.", + "type": "class", + "file_path": "scipy\\scipy\\stats\\_relative_risk.py", + "ast_data": "ClassDef name:RelativeRiskResult FunctionDef name:confidence_interval arg:self arg:confidence_level arguments arg arg If Compare Raise Call If BoolOp Compare Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Assign Assign Call Assign Assign Call Assign Assign Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "get_exclude", + "source_code": "def get_exclude(self, request, obj=None):\n return self.exclude", + "docstring": "Hook for specifying exclude.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:get_exclude arg:self arg:request arg:obj arguments arg arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "restore", + "source_code": "def restore(self):\n self.read_checkpoint_manager.restore_or_initialize()", + "docstring": "Restore the training state from the backed up checkpoint file. Returns: True if the training state is successfully restored. False if the training state doesn't need to be restored, or error occurred so it can't.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\worker_training_state.py", + "ast_data": "FunctionDef name:restore arg:self arguments arg Call" + }, + { + "library": "virtualenv", + "name": "py_info_clear", + "source_code": "def py_info_clear(self):\n py_info_folder = self.py_info_at\n with py_info_folder:\n for filename in py_info_folder.path.iterdir():\n if filename.suffix == '.json':\n with py_info_folder.lock_for_key(filename.stem):\n if filename.exists():\n filename.unlink()", + "docstring": "clear py info.", + "type": "method", + "file_path": "virtualenv\\src\\virtualenv\\app_data\\via_disk_folder.py", + "ast_data": "FunctionDef name:py_info_clear arg:self arguments arg Assign With For Call If Compare With Call If Call Call" + }, + { + "library": "pytorch", + "name": "_register_orig_params", + "source_code": "def _register_orig_params(state: _FSDPState, module: nn.Module) -> None:\n handle = _module_handle(state, module)\n if not handle:\n return\n _deregister_flat_param(state, module)\n if handle.is_sharded(handle.flat_param):\n handle._use_sharded_views()\n handle._use_sharded_grad_views()\n else:\n handle._use_unsharded_views(as_params=True)", + "docstring": "Deregisters the ``; registers the original parameters.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_unshard_param_utils.py", + "ast_data": "FunctionDef name:_register_orig_params arg:state arg:module arguments arg arg Assign Call If Return return:no Call If Call Call Call Call" + }, + { + "library": "sphinx", + "name": "TexinfoWriter", + "source_code": "class TexinfoWriter(writers.Writer):\n supported = ('texinfo', 'texi')\n settings_spec = ('Texinfo Specific Options', None, (('Name of the Info file', ['--texinfo-filename'], {'default': ''}), ('Dir entry', ['--texinfo-dir-entry'], {'default': ''}), ('Description', ['--texinfo-dir-description'], {'default': ''}), ('Category', ['--texinfo-dir-category'], {'default': 'Miscellaneous'})))\n settings_defaults: ClassVar[dict[str, Any]] = {}\n output: str\n visitor_attributes = ('output', 'fragment')\n\n def __init__(self, builder: TexinfoBuilder) -> None:\n super().__init__()\n self.builder = builder\n\n def translate(self) -> None:\n assert isinstance(self.document, nodes.document)\n visitor = self.builder.create_translator(self.document, self.builder)\n self.visitor = cast('TexinfoTranslator', visitor)\n self.document.walkabout(visitor)\n self.visitor.finish()\n for attr in self.visitor_attributes:\n setattr(self, attr, getattr(self.visitor, attr))", + "docstring": "Texinfo writer for generating Texinfo documents.", + "type": "class", + "file_path": "sphinx\\sphinx\\writers\\texinfo.py", + "ast_data": "ClassDef name:TexinfoWriter Assign Assign Assign FunctionDef name:__init__ arg:self arg:builder arguments arg arg Call Call Assign FunctionDef name:translate arg:self arguments arg Call Assign Call Assign Call Call Call For Call Call" + }, + { + "library": "pytorch", + "name": "Singleton", + "source_code": "@dataclass\nclass Singleton(DimSpec):\n pass", + "docstring": "Output dimension is a singleton.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_view_ops.py", + "ast_data": "ClassDef name:Singleton" + }, + { + "library": "pytorch", + "name": "_skip_coverage", + "source_code": "@staticmethod\ndef _skip_coverage(path: str) -> bool:\n return 'third-party' in path", + "docstring": "Returns True if file path should not be processed. This is repo-specific and only makes sense for the current state of ovrsource.", + "type": "method", + "file_path": "pytorch\\tools\\code_coverage\\package\\tool\\parser\\gcov_coverage_parser.py", + "ast_data": "FunctionDef name:_skip_coverage arg:path arguments arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "add_to_tensor", + "source_code": "def add_to_tensor(self, mat, name='add_to_tensor'):\n with self._name_scope(name):\n multiplier_vector = array_ops.expand_dims(self.multiplier, -1)\n mat = tensor_conversion.convert_to_tensor_v2_with_dispatch(mat, name='mat')\n mat_diag = array_ops.matrix_diag_part(mat)\n new_diag = multiplier_vector + mat_diag\n return array_ops.matrix_set_diag(mat, new_diag)", + "docstring": "Add matrix represented by this operator to . Equiv to . Args: mat: with same and shape broadcastable to . name: A name to give this . Returns: A with broadcast shape and same as .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_identity.py", + "ast_data": "FunctionDef name:add_to_tensor arg:self arg:mat arg:name arguments arg arg arg With Call Assign Call Assign Call Assign Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, debug_ops=None, node_name_regex_allowlist=None, op_type_regex_allowlist=None, tensor_dtype_regex_allowlist=None, tolerate_debug_op_creation_failures=False):\n if debug_ops:\n self.debug_ops = debug_ops\n else:\n self.debug_ops = ['DebugIdentity']\n self.node_name_regex_allowlist = node_name_regex_allowlist\n self.op_type_regex_allowlist = op_type_regex_allowlist\n self.tensor_dtype_regex_allowlist = tensor_dtype_regex_allowlist\n self.tolerate_debug_op_creation_failures = tolerate_debug_op_creation_failures", + "docstring": "Constructor of WatchOptions: Debug watch options. Used as return values of s. Args: debug_ops: ( or ) Debug ops to be used. node_name_regex_allowlist: Regular-expression allowlist for node_name, e.g., op_type_regex_allowlist: Regular-expression allowlist for the op type of nodes, e.g., . If both and are set, the two filtering operations will occur in a logical relation. In other words, a node will be included if and only if it hits both allowlists. tensor_dtype_regex_allowlist: Regular-expression allowlist for Tensor data type, e.g., . This allowlist operates in logical relations to the two allowlists above. tolerate_debug_op_creation_failures: () whether debug op creation failures (e.g., due to dtype incompatibility) are to be tolerated by not throwing exceptions.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:debug_ops arg:node_name_regex_allowlist arg:op_type_regex_allowlist arg:tensor_dtype_regex_allowlist arg:tolerate_debug_op_creation_failures arguments arg arg arg arg arg arg If Assign Assign Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "_get_handle_indices", + "source_code": "def _get_handle_indices(self, handle: FlatParamHandle) -> tuple[Optional[int], ...]:\n indices: list[Optional[int]] = []\n if handle:\n indices.append(handle._handle_index)\n return tuple(indices)", + "docstring": "Returns the handle indices (i.e. indices into `` if the handle is invalid.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_exec_order_utils.py", + "ast_data": "FunctionDef name:_get_handle_indices arg:self arg:handle arguments arg arg If Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "load", + "source_code": "def load(self):\n data = self._load()\n if data is None or data[1] < self.now():\n if self.debug:\n cherrypy.log('Expired session %r, flushing data.' % self.id, 'TOOLS.SESSIONS')\n self._data = {}\n else:\n if self.debug:\n cherrypy.log('Data loaded for session %r.' % self.id, 'TOOLS.SESSIONS')\n self._data = data[0]\n self.loaded = True\n cls = self.__class__\n if self.clean_freq and (not cls.clean_thread):\n t = cherrypy.process.plugins.Monitor(cherrypy.engine, self.clean_up, self.clean_freq * 60, name='Session cleanup')\n t.subscribe()\n cls.clean_thread = t\n t.start()\n if self.debug:\n cherrypy.log('Started cleanup thread.', 'TOOLS.SESSIONS')", + "docstring": "Copy stored session data into this session instance.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:load arg:self arguments arg Assign Call If BoolOp Compare Compare Call If Call Assign If Call Assign Assign Assign If BoolOp Assign Call Call Assign Call If Call" + }, + { + "library": "kornia", + "name": "CFA", + "source_code": "class CFA(Enum):\n BG = 0\n GB = 1\n RG = 2\n GR = 3", + "docstring": "Define the configuration of the color filter array. So far only bayer images is supported and the enum sets the pixel order for bayer. Note that this can change due to things like rotations and cropping of images. Take care if including the translations in pipeline. This implementations is optimized to be reasonably fast, look better than simple nearest neighbour. On top of this care is taken to make it reversible going raw -> rgb -> raw. the raw samples remain intact during conversion and only unknown samples are interpolated. The names are based on the OpenCV convention where the BG indicates pixel 1,1 (counting from 0,0) is blue and its neighbour to the right is green. In that case the top left pixel is red. Other options are GB, RG and GR reference:", + "type": "class", + "file_path": "kornia\\kornia\\color\\raw.py", + "ast_data": "ClassDef name:CFA Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "unflatten_state_dict", + "source_code": "def unflatten_state_dict(state_dict: STATE_DICT_TYPE, mapping: FLATTEN_MAPPING) -> STATE_DICT_TYPE:\n nested: STATE_DICT_TYPE = {}\n for key, value in state_dict.items():\n set_element(nested, mapping[key], value)\n return nested", + "docstring": "Restore the original nested state_dict according to ``.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\_nested_dict.py", + "ast_data": "FunctionDef name:unflatten_state_dict arg:state_dict arg:mapping arguments arg arg For Call Call Return return:yes" + }, + { + "library": "django", + "name": "format_value", + "source_code": "def format_value(self, value):\n if value is True or value is False or value is None or (value == ''):\n return\n return str(value)", + "docstring": "Only return the 'value' attribute if value isn't empty.", + "type": "method", + "file_path": "django\\django\\forms\\widgets.py", + "ast_data": "FunctionDef name:format_value arg:self arg:value arguments arg arg If BoolOp Compare Compare Compare Compare Return return:no Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "httpserver_from_self", + "source_code": "def httpserver_from_self(self, httpserver=None):\n if httpserver is None:\n httpserver = self.instance\n if httpserver is None:\n from cherrypy import _cpwsgi_server\n httpserver = _cpwsgi_server.CPWSGIServer(self)\n if isinstance(httpserver, text_or_bytes):\n httpserver = attributes(httpserver)(self)\n return (httpserver, self.bind_addr)", + "docstring": "Return a (httpserver, bind_addr) pair based on self attributes.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpserver.py", + "ast_data": "FunctionDef name:httpserver_from_self arg:self arg:httpserver arguments arg arg If Compare Assign If Compare Assign Call If Call Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "find_stack_level", + "source_code": "def find_stack_level() -> int:\n import pandas as pd\n pkg_dir = os.path.dirname(pd.__file__)\n test_dir = os.path.join(pkg_dir, 'tests')\n frame: FrameType | None = inspect.currentframe()\n try:\n n = 0\n while frame:\n filename = inspect.getfile(frame)\n if filename.startswith(pkg_dir) and (not filename.startswith(test_dir)):\n frame = frame.f_back\n n += 1\n else:\n break\n finally:\n del frame\n return n", + "docstring": "Find the first place in the stack that is not inside pandas (tests notwithstanding).", + "type": "function", + "file_path": "pandas\\pandas\\util\\_exceptions.py", + "ast_data": "FunctionDef name:find_stack_level arguments Assign Call Assign Call Call Try Assign While Assign Call If BoolOp Call Call Assign Return return:yes" + }, + { + "library": "seaborn", + "name": "__call__", + "source_code": "def __call__(self, x1, x2=None, weights=None):\n x1 = np.asarray(x1)\n if weights is None:\n weights = np.ones_like(x1)\n else:\n weights = np.asarray(weights)\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)", + "docstring": "Return proportion or count of observations below each sorted datapoint.", + "type": "method", + "file_path": "seaborn\\seaborn\\_statistics.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:x1 arg:x2 arg:weights arguments arg arg arg arg Assign Call If Compare Assign Call Assign Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_from_dataframe", + "source_code": "def _from_dataframe(df: DataFrameXchg, allow_copy: bool=True) -> pd.DataFrame:\n pandas_dfs = []\n for chunk in df.get_chunks():\n pandas_df = protocol_df_chunk_to_pandas(chunk)\n pandas_dfs.append(pandas_df)\n if not allow_copy and len(pandas_dfs) > 1:\n raise RuntimeError('To join chunks a copy is required which is forbidden by allow_copy=False')\n if not pandas_dfs:\n pandas_df = protocol_df_chunk_to_pandas(df)\n elif len(pandas_dfs) == 1:\n pandas_df = pandas_dfs[0]\n else:\n pandas_df = pd.concat(pandas_dfs, axis=0, ignore_index=True, copy=False)\n index_obj = df.metadata.get('pandas.index', None)\n if index_obj is not None:\n pandas_df.index = index_obj\n return pandas_df", + "docstring": "Build a `__dataframe__` method. allow_copy : bool, default: True Whether to allow copying the memory to perform the conversion (if false then zero-copy approach is requested). Returns ------- pd.DataFrame", + "type": "function", + "file_path": "pandas\\pandas\\core\\interchange\\from_dataframe.py", + "ast_data": "FunctionDef name:_from_dataframe arg:df arg:allow_copy arguments arg arg Assign For Call Assign Call Call If BoolOp Compare Call Raise Call If Assign Call If Compare Call Assign Assign Call Assign Call If Compare Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "status", + "source_code": "@property\ndef status(self):\n _, status = self.args[:2]\n return status", + "docstring": "The integer HTTP status code to emit.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cperror.py", + "ast_data": "FunctionDef name:status arg:self arguments arg Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "add_module", + "source_code": "def add_module(self, name: str, module: Optional['Module']) -> None:\n if not isinstance(module, Module) and module is not None:\n raise TypeError(f'{torch.typename(module)} is not a Module subclass')\n elif not isinstance(name, str):\n raise TypeError(f'module name should be a string. Got {torch.typename(name)}')\n elif hasattr(self, name) and name not in self._modules:\n raise KeyError(f\"attribute '{name}' already exists\")\n elif '.' in name:\n raise KeyError(f\"\"\"module name can't contain \".\", got: {name}\"\"\")\n elif name == '':\n raise KeyError('module name can\\'t be empty string \"\"')\n for hook in _global_module_registration_hooks.values():\n output = hook(self, name, module)\n if output is not None:\n module = output\n self._modules[name] = module", + "docstring": "Add a child module to the current module. The module can be accessed as an attribute using the given name. Args: name (str): name of the child module. The child module can be accessed from this module using the given name module (Module): child module to be added to the module.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:add_module arg:self arg:name arg:module arguments arg arg arg If BoolOp Call Compare Raise Call Call If Call Raise Call Call If BoolOp Call Compare Raise Call If Compare Raise Call If Compare Raise Call For Call Assign Call If Compare Assign Assign" + }, + { + "library": "tensorflow", + "name": "matvec", + "source_code": "def matvec(self, x, adjoint=False, name='matvec'):\n with self._name_scope(name):\n block_dimensions = self._block_range_dimensions() if adjoint else self._block_domain_dimensions()\n if linear_operator_util.arg_is_blockwise(block_dimensions, x, -1):\n for i, block in enumerate(x):\n if not isinstance(block, linear_operator.LinearOperator):\n block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block)\n self._check_input_dtype(block)\n block_dimensions[i].assert_is_compatible_with(block.shape[-1])\n x[i] = block\n x_mat = [block[..., array_ops.newaxis] for block in x]\n y_mat = self.matmul(x_mat, adjoint=adjoint)\n return [array_ops.squeeze(y, axis=-1) for y in y_mat]\n x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x')\n self._check_input_dtype(x)\n op_dimension = self.range_dimension if adjoint else self.domain_dimension\n op_dimension.assert_is_compatible_with(x.shape[-1])\n x_mat = x[..., array_ops.newaxis]\n y_mat = self.matmul(x_mat, adjoint=adjoint)\n return array_ops.squeeze(y_mat, axis=-1)", + "docstring": "Transform [batch] vector with left multiplication: . Args: x: with compatible shape and same as , or an iterable of s (for blockwise operators). s are treated a [batch] vectors, meaning for every set of leading dimensions, the last dimension defines a vector. See class docstring for definition of compatibility. adjoint: Python . If , left multiply by the adjoint: . name: A name for this . Returns: A with shape and same as .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_block_diag.py", + "ast_data": "FunctionDef name:matvec arg:self arg:x arg:adjoint arg:name arguments arg arg arg arg With Call Assign Call Call If Call For Call If Call Assign Call Call Call Assign Assign Assign Call Return return:yes Call Assign Call Call Assign Call Assign Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_flat_args_with_check", + "source_code": "def _get_flat_args_with_check(self, args, kwargs):\n in_spec = self.call_spec.in_spec\n if in_spec is not None:\n kwargs = reorder_kwargs(kwargs, in_spec)\n flat_args_with_path, received_spec = pytree.tree_flatten_with_path((args, kwargs))\n self._check_input_constraints(flat_args_with_path)\n flat_args = tuple((x[1] for x in flat_args_with_path))\n return (flat_args, received_spec)", + "docstring": "Flatten args, kwargs using pytree, then, check specs. Args: args: List[Any] original args passed to __call__ kwargs: Dict[str, Any] original kwargs passed to __call Returns: A tuple of (flat_args, received_spec) flat_args is flattend args / kwargs received_spec is the pytree spec produced while flattening the tuple (args, kwargs)", + "type": "method", + "file_path": "pytorch\\torch\\export\\exported_program.py", + "ast_data": "FunctionDef name:_get_flat_args_with_check arg:self arg:args arg:kwargs arguments arg arg arg Assign If Compare Assign Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_descendants_with_paths", + "source_code": "def _descendants_with_paths(self):\n bfs_sorted = []\n to_visit = collections.deque([self.root])\n node_paths = object_identity.ObjectIdentityDictionary()\n node_paths[self.root] = ()\n while to_visit:\n current_trackable = to_visit.popleft()\n bfs_sorted.append(current_trackable)\n for name, dependency in self.children(current_trackable).items():\n if dependency not in node_paths:\n node_paths[dependency] = node_paths[current_trackable] + (base.TrackableReference(name, dependency),)\n to_visit.append(dependency)\n return (bfs_sorted, node_paths)", + "docstring": "Returns a list of all nodes and its paths from self.root using a breadth first traversal.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\trackable_view.py", + "ast_data": "FunctionDef name:_descendants_with_paths arg:self arguments arg Assign Assign Call Assign Call Assign While Assign Call Call For Call Call If Compare Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "pip_download", + "source_code": "@timed('Downloading packages')\ndef pip_download(self, *packages: str, prerelease: bool=False, **popen_kwargs: Any) -> list[Path]:\n tmpdir = tempfile.TemporaryDirectory(prefix='pip-download-')\n atexit.register(tmpdir.cleanup)\n tempdir = Path(tmpdir.name).absolute()\n print(f'Downloading package(s) ({self.pip_source.index_url}): {', '.join(packages)}')\n if prerelease:\n args = ['--pre', *packages]\n else:\n args = list(packages)\n self.pip('download', '--dest', str(tempdir), *args, **popen_kwargs)\n files = list(tempdir.iterdir())\n print(f'Downloaded {len(files)} file(s) to {tempdir}:')\n for file in files:\n print(f' - {file.name}')\n return files", + "docstring": "Download a package in the virtual environment.", + "type": "method", + "file_path": "pytorch\\tools\\nightly.py", + "ast_data": "FunctionDef name:pip_download arg:self arguments arg arg arg arg Assign Call Call Assign Call Call Call Call If Assign Assign Call Call Call Assign Call Call Call Call For Call Return return:yes Call" + }, + { + "library": "django", + "name": "FilteredSelectMultiple", + "source_code": "class FilteredSelectMultiple(forms.SelectMultiple):\n\n class Media:\n js = ['admin/js/core.js', 'admin/js/SelectBox.js', 'admin/js/SelectFilter2.js']\n\n def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):\n self.verbose_name = verbose_name\n self.is_stacked = is_stacked\n super().__init__(attrs, choices)\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n context['widget']['attrs']['class'] = 'selectfilter'\n if self.is_stacked:\n context['widget']['attrs']['class'] += 'stacked'\n context['widget']['attrs']['data-field-name'] = self.verbose_name\n context['widget']['attrs']['data-is-stacked'] = int(self.is_stacked)\n return context", + "docstring": "A SelectMultiple with a JavaScript filter interface. Note that the resulting JavaScript assumes that the jsi18n catalog has been loaded in the page", + "type": "class", + "file_path": "django\\django\\contrib\\admin\\widgets.py", + "ast_data": "ClassDef name:FilteredSelectMultiple ClassDef name:Media Assign FunctionDef name:__init__ arg:self arg:verbose_name arg:is_stacked arg:attrs arg:choices arguments arg arg arg arg arg Assign Assign Call Call FunctionDef name:get_context arg:self arg:name arg:value arg:attrs arguments arg arg arg arg Assign Call Call Assign If Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "basis", + "source_code": "def basis(A):\n return torch.linalg.qr(A).Q", + "docstring": "Return orthogonal basis of A columns.", + "type": "function", + "file_path": "pytorch\\torch\\_linalg_utils.py", + "ast_data": "FunctionDef name:basis arg:A arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_text_props", + "source_code": "@_docstring.interpd\ndef set_text_props(self, **kwargs):\n self._text._internal_update(kwargs)\n self.stale = True", + "docstring": "Update the text properties. Valid keyword arguments are: %(Text:kwdoc)s", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\table.py", + "ast_data": "FunctionDef name:set_text_props arg:self arguments arg arg Call Assign" + }, + { + "library": "pytorch", + "name": "step", + "source_code": "def step(self) -> None:\n self.last_epoch += 1\n idx = bisect_right(self._milestones, self.last_epoch)\n scheduler = self._schedulers[idx]\n if idx > 0 and self._milestones[idx - 1] == self.last_epoch:\n scheduler.step(0)\n else:\n scheduler.step()\n self._last_lr = scheduler.get_last_lr()", + "docstring": "Perform a step.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "FunctionDef name:step arg:self arguments arg Assign Call Assign If BoolOp Compare Compare Call Call Assign Call" + }, + { + "library": "pytorch", + "name": "get_first_attr", + "source_code": "def get_first_attr(obj, *attrs):\n for attr in attrs:\n if hasattr(obj, attr):\n return getattr(obj, attr)\n raise AssertionError(f'{obj} does not has any of the attributes: {attrs}')", + "docstring": "Return the first available attribute or throw an exception if none is present.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:get_first_attr arg:obj arguments arg arg For If Call Return return:yes Call Raise Call" + }, + { + "library": "tensorflow", + "name": "get_tensor", + "source_code": "def get_tensor(self, tensor_index, subgraph_index=0):\n return self._interpreter.GetTensor(tensor_index, subgraph_index)", + "docstring": "Gets the value of the output tensor (get a copy). If you wish to avoid the copy, use . This function cannot be used to read intermediate results. Args: tensor_index: Tensor index of tensor to get. This value can be gotten from the 'index' field in get_output_details. subgraph_index: Index of the subgraph to fetch the tensor. Default value is 0, which means to fetch from the primary subgraph. Returns: a numpy array.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py", + "ast_data": "FunctionDef name:get_tensor arg:self arg:tensor_index arg:subgraph_index arguments arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "Powell", + "source_code": "class Powell(Benchmark):\n\n def __init__(self, dimensions=4):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-4.0] * self.N, [5.0] * self.N))\n self.global_optimum = [[0, 0, 0, 0]]\n self.fglob = 0\n\n def fun(self, x, *args):\n self.nfev += 1\n return (x[0] + 10 * x[1]) ** 2 + 5 * (x[2] - x[3]) ** 2 + (x[1] - 2 * x[2]) ** 4 + 10 * (x[0] - x[3]) ** 4", + "docstring": "Powell objective function. This class defines the Powell [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Powell}}(x) = (x_3+10x_1)^2 + 5(x_2-x_4)^2 + (x_1-2x_2)^4 + 10(x_3-x_4)^4 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: ..[1] Powell, M. An iterative method for finding stationary values of a function of several variables Computer Journal, 1962, 5, 147-151", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py", + "ast_data": "ClassDef name:Powell FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X, **params):\n check_is_fitted(self)\n X = validate_data(self, X, ensure_all_finite=False, accept_sparse=True, reset=False)\n _raise_for_params(params, self, 'predict')\n if _routing_enabled():\n predict_params = process_routing(self, 'predict', **params).estimator['predict']\n else:\n predict_params = {}\n return self.estimator_.predict(X, **predict_params)", + "docstring": "Predict using the estimated model. This is a wrapper for . Parameters ---------- X : {array-like or sparse matrix} of shape (n_samples, n_features) Input data. **params : dict Parameters routed to the method of the sub-estimator via the metadata routing API. .. versionadded:: 1.5 Only available if is set. See :ref: for more details. Returns ------- y : array, shape = [n_samples] or [n_samples, n_targets] Returns predicted values.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_ransac.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg arg Call Assign Call Call If Call Assign Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_strip_node_default_valued_attrs", + "source_code": "def _strip_node_default_valued_attrs(node_def):\n if node_def.op in op_name_to_function:\n return\n op_def = op_def_registry.get(node_def.op)\n if op_def is None:\n return\n attrs_to_strip = set()\n for attr_name, attr_value in node_def.attr.items():\n if _is_default_attr_value(op_def, attr_name, attr_value):\n attrs_to_strip.add(attr_name)\n for attr in attrs_to_strip:\n del node_def.attr[attr]", + "docstring": "Removes default valued attributes from a single node def.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\meta_graph.py", + "ast_data": "FunctionDef name:_strip_node_default_valued_attrs arg:node_def arguments arg If Compare Return return:no Assign Call If Compare Return return:no Assign Call For Call If Call Call For" + }, + { + "library": "numpy", + "name": "_calculate_shapes", + "source_code": "def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims):\n return [broadcast_shape + tuple((dim_sizes[dim] for dim in core_dims)) for core_dims in list_of_core_dims]", + "docstring": "Helper for calculating broadcast shapes with core dimensions.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_function_base_impl.py", + "ast_data": "FunctionDef name:_calculate_shapes arg:broadcast_shape arg:dim_sizes arg:list_of_core_dims arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "reverse_closure", + "source_code": "def reverse_closure(roots: list[Node], target_nodes: set[Node], reverse_edges_dict) -> tuple[set[Node], set[Node]]:\n closure: set[Node] = set()\n visited_target_nodes = set()\n q: collections.deque[Node] = collections.deque()\n for node in roots:\n if node is not None and node not in closure:\n closure.add(node)\n q.append(node)\n while q:\n node = q.popleft()\n reverse_edges = reverse_edges_dict[node]\n for fn in reverse_edges:\n if fn in closure or fn is None:\n continue\n if fn in target_nodes:\n visited_target_nodes.add(fn)\n continue\n closure.add(fn)\n q.append(fn)\n return (closure, visited_target_nodes)", + "docstring": "This function returns the reverse closure of the given roots, i.e. the set of nodes that can be reached from the roots by following the reverse edges of the graph. The target_nodes are the nodes that we want to include in the closure.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\pipelining\\_backward.py", + "ast_data": "FunctionDef name:reverse_closure arg:roots arg:target_nodes arg:reverse_edges_dict arguments arg arg arg Call Assign Call Call For If BoolOp Compare Compare Call Call While Assign Call Assign For If BoolOp Compare Compare If Compare Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "CompositeTensorGradient", + "source_code": "class CompositeTensorGradient(object, metaclass=abc.ABCMeta):\n\n @abc.abstractmethod\n def get_gradient_components(self, value):\n raise NotImplementedError(f'{type(self).__name__}.get_gradient_components()')\n\n @abc.abstractmethod\n def replace_gradient_components(self, value, component_grads):\n raise NotImplementedError(f'{type(self).__name__}.replace_gradient_components()')", + "docstring": "Class used to help compute gradients for CompositeTensors. This abstract base class defines two methods: , which returns the components of a value that should be included in gradients; and , which replaces the gradient components in a value. These methods can be used to compute the gradient of a with respect to () as follows: * If is a with = , then = . * If is a with = 'x.__composite_gradient__', then = .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor_gradient.py", + "ast_data": "ClassDef name:CompositeTensorGradient FunctionDef name:get_gradient_components arg:self arg:value arguments arg arg Raise Call Call FunctionDef name:replace_gradient_components arg:self arg:value arg:component_grads arguments arg arg arg Raise Call Call" + }, + { + "library": "pytorch", + "name": "_name_hoo_subgraph_placeholders", + "source_code": "def _name_hoo_subgraph_placeholders(gm: torch.fx.GraphModule) -> None:\n subgraph_ph_tuples: list[tuple[torch.fx.GraphModule, list[torch.fx.Node]]] = []\n for node in gm.graph.nodes:\n if node.op == 'call_function' and isinstance(node.target, torch._ops.HigherOrderOperator):\n if node.target._name == 'cond':\n _, true_graph, false_graph, cond_args = node._args\n subgraph_ph_tuples.append((getattr(gm, true_graph.target), cond_args))\n subgraph_ph_tuples.append((getattr(gm, false_graph.target), cond_args))\n elif node.target._name == 'wrap_with_set_grad_enabled':\n subgraph, phs = (node._args[1], node._args[2:])\n subgraph_ph_tuples.append((getattr(gm, subgraph.target), phs))\n elif node.target._name == 'map_impl':\n body_graph, array, args = node._args\n subgraph_ph_tuples.append((getattr(gm, body_graph.target), array + args))\n for subgraph, hoo_phs in subgraph_ph_tuples:\n name_map: dict[str, str] = {}\n for i, node in enumerate(subgraph.graph.nodes):\n if i < len(hoo_phs):\n name_map[node.name] = hoo_phs[i].name\n node.name = node.target = hoo_phs[i].name\n else:\n node.name = _rename_without_collisions(name_map, node.name, node.name)\n _name_hoo_subgraph_placeholders(subgraph)\n subgraph.recompile()", + "docstring": "Propagate placeholder names from the top-level graph into HigherOrderOp subgraphs, and handle collisions with non-placeholders by count suffixing. Different HOO subgraph types have different input schemas, so we first enumerate them and gather the top-level named placeholder nodes.", + "type": "function", + "file_path": "pytorch\\torch\\_export\\utils.py", + "ast_data": "FunctionDef name:_name_hoo_subgraph_placeholders arg:gm arguments arg For If BoolOp Compare Call If Compare Assign Call Call Call Call If Compare Assign Call Call If Compare Assign Call Call For For Call If Compare Call Assign Assign Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "all", + "source_code": "@classmethod\ndef all(cls):\n return tuple(cls.__members__.values())", + "docstring": "Returns a tuple that enables all options.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\converter.py", + "ast_data": "FunctionDef name:all arg:cls arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "create_saveable_object", + "source_code": "def create_saveable_object(name, key, factory, call_with_mapped_captures):\n if call_with_mapped_captures is None:\n return factory(name=key)\n if name == trackable_utils.SERIALIZE_TO_TENSORS_NAME:\n return factory(name=key, call_with_mapped_captures=call_with_mapped_captures)\n elif is_factory_for_restored_saveable_object(factory):\n concrete_save_fn = factory.keywords['save_function']\n\n def save_fn(name):\n return call_with_mapped_captures(concrete_save_fn, [name])\n concrete_restore_fn = factory.keywords['restore_function']\n\n def restore_fn(*restored_tensors):\n return call_with_mapped_captures(concrete_restore_fn, restored_tensors)\n return factory(save_function=save_fn, restore_function=restore_fn, name=key)\n else:\n return factory(name=key)", + "docstring": "Creates a SaveableObject while potentially in a different graph. When creating the frozen saver for SavedModel, the save and restore ops are placed in a separate graph. Since RestoredSaveableObject uses tf.functions to save and restore, the function captures must be mapped to the new graph. Args: name: Name of SaveableObject factory. key: Checkpoint key of this SaveableObject. factory: Factory method for creating the SaveableObject. call_with_mapped_captures: Helper that calls a tf.function while remapping the captures. Returns: a SaveableObject.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py", + "ast_data": "FunctionDef name:create_saveable_object arg:name arg:key arg:factory arg:call_with_mapped_captures arguments arg arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call If Call Assign FunctionDef name:save_fn arg:name arguments arg Return return:yes Call Assign FunctionDef name:restore_fn arguments arg Return return:yes Call Return return:yes Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "translate_y", + "source_code": "def translate_y(min_mag: float, max_mag: float) -> OperationBase:\n if min_mag != -max_mag:\n raise ValueError(f'{TranslateY.__name__} is a symmetric operation that `- min_mag == max_mag`. Got [{min_mag}, {max_mag}]')\n return TranslateY(None, 1.0, magnitude_range=(0.0, max_mag), symmetric_megnitude=True)", + "docstring": "Return TranslateY op.", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\auto\\rand_augment\\ops.py", + "ast_data": "FunctionDef name:translate_y arg:min_mag arg:max_mag arguments arg arg If Compare Raise Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "list_all", + "source_code": "def list_all(self):\n self._ensure_entry_points_loaded()\n return [*self.list_builtin(), *self._backend_to_gui_framework]", + "docstring": "Return list of all known backends. These include built-in backends and those obtained at runtime either from entry points or explicit `` syntax. Entry points will be loaded if they haven't been already. Returns ------- list of str Backend names.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\registry.py", + "ast_data": "FunctionDef name:list_all arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "set_weight_collections", + "source_code": "def set_weight_collections(self, weight_collections):\n self._weight_collections = weight_collections", + "docstring": "Sets the weight collections for the layer. Args: weight_collections: A list of collection names to which the Variable will be added.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:set_weight_collections arg:self arg:weight_collections arguments arg arg Assign" + }, + { + "library": "pytorch", + "name": "_no_conv_bias_filter", + "source_code": "def _no_conv_bias_filter(match: 'InternalMatch', original_graph: Graph, pattern_graph: Graph) -> bool:\n return not _has_conv_bias_filter(match, original_graph, pattern_graph)", + "docstring": "Match filter for the subgraph rewriter that returns True if the conv node in the original graph does NOT have bias.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\qat_utils.py", + "ast_data": "FunctionDef name:_no_conv_bias_filter arg:match arg:original_graph arg:pattern_graph arguments arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_shade_colors", + "source_code": "def _shade_colors(color, normals, lightsource=None):\n if lightsource is None:\n lightsource = mcolors.LightSource(azdeg=225, altdeg=19.4712)\n with np.errstate(invalid='ignore'):\n shade = normals / np.linalg.norm(normals, axis=1, keepdims=True) @ lightsource.direction\n mask = ~np.isnan(shade)\n if mask.any():\n in_norm = mcolors.Normalize(-1, 1)\n out_norm = mcolors.Normalize(0.3, 1).inverse\n\n def norm(x):\n return out_norm(in_norm(x))\n shade[~mask] = 0\n color = mcolors.to_rgba_array(color)\n alpha = color[:, 3]\n colors = norm(shade)[:, np.newaxis] * color\n colors[:, 3] = alpha\n else:\n colors = np.asanyarray(color).copy()\n return colors", + "docstring": "Shade *color* using normal vectors given by *normals*, assuming a *lightsource* (using default position if not given). *color* can also be an array of the same length as *normals*.", + "type": "function", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:_shade_colors arg:color arg:normals arg:lightsource arguments arg arg arg If Compare Assign Call With Call Assign Call Assign Call If Call Assign Call Assign Call FunctionDef name:norm arg:x arguments arg Return return:yes Call Call Assign Assign Call Assign Assign Call Assign Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "OrderedDictWrapper", + "source_code": "class OrderedDictWrapper:\n\n def __init__(self, cpp_module, attr):\n self.cpp_module = cpp_module\n self.attr = attr\n\n @property\n def cpp_dict(self):\n return getattr(self.cpp_module, self.attr)\n\n def items(self):\n return self.cpp_dict.items()\n\n def keys(self):\n return self.cpp_dict.keys()\n\n def values(self):\n return self.cpp_dict.values()\n\n def __iter__(self):\n return self.cpp_dict.__iter__()\n\n def __len__(self):\n return self.cpp_dict.__len__()\n\n def __contains__(self, key):\n return self.cpp_dict.__contains__(key)\n\n def __getitem__(self, key):\n return self.cpp_dict.__getitem__(key)", + "docstring": "A wrapper around a C++ OrderedDict. It dynamically evaluates the OrderedDict getter on a bound C++ module, such that new changes on the C++ side are picked up. Otherwise accessing e.g. `` so using properties does not work.", + "type": "class", + "file_path": "pytorch\\torch\\nn\\cpp.py", + "ast_data": "ClassDef name:OrderedDictWrapper FunctionDef name:__init__ arg:self arg:cpp_module arg:attr arguments arg arg arg Assign Assign FunctionDef name:cpp_dict arg:self arguments arg Return return:yes Call FunctionDef name:items arg:self arguments arg Return return:yes Call FunctionDef name:keys arg:self arguments arg Return return:yes Call FunctionDef name:values arg:self arguments arg Return return:yes Call FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call FunctionDef name:__contains__ arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "_skew", + "source_code": "def _skew(data):\n data = np.ravel(data)\n mu = data.mean()\n m2 = ((data - mu) ** 2).mean()\n m3 = ((data - mu) ** 3).mean()\n return m3 / np.power(m2, 1.5)", + "docstring": "skew is third central moment / variance**(1.5)", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:_skew arg:data arguments arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "assemble", + "source_code": "def assemble(instructions: list[Instruction], firstlineno: int) -> tuple[bytes, bytes]:\n code: list[int] = []\n if sys.version_info >= (3, 11):\n lnotab, update_lineno = linetable_311_writer(firstlineno)\n num_ext = 0\n for i, inst in enumerate(instructions):\n if inst.opname == 'EXTENDED_ARG':\n inst_size = 1\n num_ext += 1\n for j in (1, 2, 3):\n if instructions[i + j].opname != 'EXTENDED_ARG':\n inst.positions = instructions[i + j].positions\n break\n else:\n inst_size = instruction_size(inst) // 2 + num_ext\n num_ext = 0\n update_lineno(inst.positions, inst_size)\n num_ext = 0\n arg = inst.arg or 0\n code.extend((inst.opcode, arg & 255))\n for _ in range(instruction_size(inst) // 2 - 1):\n code.extend((0, 0))\n else:\n if sys.version_info < (3, 10):\n lnotab, update_lineno = lnotab_writer(firstlineno)\n else:\n lnotab, update_lineno, end = linetable_310_writer(firstlineno)\n for inst in instructions:\n if inst.starts_line is not None:\n update_lineno(inst.starts_line, len(code))\n arg = inst.arg or 0\n code.extend((inst.opcode, arg & 255))\n if sys.version_info >= (3, 10):\n end(len(code))\n return (bytes(code), bytes(lnotab))", + "docstring": "Do the opposite of dis.get_instructions()", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", + "ast_data": "FunctionDef name:assemble arg:instructions arg:firstlineno arguments arg arg If Compare Assign Call Assign For Call If Compare Assign For If Compare Assign Assign Call Assign Call Assign Assign BoolOp Call For Call Call Call If Compare Assign Call Assign Call For If Compare Call Call Assign BoolOp Call If Compare Call Call Return return:yes Call Call" + }, + { + "library": "pygame", + "name": "is_msys_mingw", + "source_code": "def is_msys_mingw():\n return False", + "docstring": "Return true if this in an MinGW/MSYS build The user may prompted for confirmation so only call this function once.", + "type": "function", + "file_path": "pygame\\buildconfig\\config.py", + "ast_data": "FunctionDef name:is_msys_mingw arguments Return return:yes" + }, + { + "library": "authlib", + "name": "update_client", + "source_code": "def update_client(self, client, client_metadata, request):\n raise NotImplementedError()", + "docstring": "Update the client in the database. Developers MUST implement this method in subclass:: def update_client(self, client, client_metadata, request): client.set_client_metadata( {**client.client_metadata, **client_metadata} ) client.save() return client :param client: the instance of OAuth client :param client_metadata: a dict of the client claims to update :param request: formatted request instance :return: client instance", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7592\\endpoint.py", + "ast_data": "FunctionDef name:update_client arg:self arg:client arg:client_metadata arg:request arguments arg arg arg arg Raise Call" + }, + { + "library": "matplotlib", + "name": "update_scalarmappable", + "source_code": "def update_scalarmappable(self):\n if not self._set_mappable_flags():\n return\n if self._A is not None:\n if self._A.ndim > 1 and (not isinstance(self, _MeshData)):\n raise ValueError('Collections can only map rank 1 arrays')\n if np.iterable(self._alpha):\n if self._alpha.size != self._A.size:\n raise ValueError(f'Data array shape, {self._A.shape} is incompatible with alpha array shape, {self._alpha.shape}. This can occur with the deprecated behavior of the \"flat\" shading option, in which a row and/or column of the data array is dropped.')\n self._alpha = self._alpha.reshape(self._A.shape)\n self._mapped_colors = self.to_rgba(self._A, self._alpha)\n if self._face_is_mapped:\n self._facecolors = self._mapped_colors\n else:\n self._set_facecolor(self._original_facecolor)\n if self._edge_is_mapped:\n self._edgecolors = self._mapped_colors\n else:\n self._set_edgecolor(self._original_edgecolor)\n self.stale = True", + "docstring": "Update colors from the scalar mappable array, if any. Assign colors to edges and faces based on the array and/or colors that were directly set, as appropriate.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:update_scalarmappable arg:self arguments arg If Call Return return:no If Compare If BoolOp Compare Call Raise Call If Call If Compare Raise Call Assign Call Assign Call If Assign Call If Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "get_int", + "source_code": "def get_int(self, min_int=_MIN_INT, max_int=_MAX_INT):\n return self.fdp.ConsumeIntInRange(min_int, max_int)", + "docstring": "Consume a signed integer with given constraints. Args: min_int: Minimum allowed integer. max_int: Maximum allowed integer. Returns: Consumed integer based on input bytes and constraints.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\python_fuzzing.py", + "ast_data": "FunctionDef name:get_int arg:self arg:min_int arg:max_int arguments arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_linewidth", + "source_code": "def get_linewidth(self):\n return self.patch.get_linewidth()", + "docstring": "Get the line width of the Figure rectangle.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:get_linewidth arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "BNReLU3d", + "source_code": "class BNReLU3d(nnq.BatchNorm3d):\n _FLOAT_MODULE = torch.ao.nn.intrinsic.BNReLU3d\n\n def __init__(self, num_features, eps=1e-05, momentum=0.1, device=None, dtype=None):\n super().__init__(num_features, eps=eps, momentum=momentum, device=device, dtype=dtype)\n\n def forward(self, input):\n if len(input.shape) != 5:\n raise ValueError('Input shape must be `(N, C, D, H, W)`!')\n return torch.ops.quantized.batch_norm3d_relu(input, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.scale, self.zero_point)\n\n def _get_name(self):\n return 'QuantizedBNReLU3d'\n\n @classmethod\n def from_float(cls, mod, use_precomputed_fake_quant=False):\n return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)\n\n @classmethod\n def from_reference(cls, bn_relu, output_scale, output_zero_point):\n return super().from_reference(bn_relu[0], output_scale, output_zero_point)", + "docstring": "A BNReLU3d module is a fused module of BatchNorm3d and ReLU We adopt the same interface as :class:. Attributes: Same as torch.ao.nn.quantized.BatchNorm3d", + "type": "class", + "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\quantized\\modules\\bn_relu.py", + "ast_data": "ClassDef name:BNReLU3d Assign FunctionDef name:__init__ arg:self arg:num_features arg:eps arg:momentum arg:device arg:dtype arguments arg arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg If Compare Call Raise Call Return return:yes Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call Call FunctionDef name:from_reference arg:cls arg:bn_relu arg:output_scale arg:output_zero_point arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "set_device", + "source_code": "def set_device(device: _device_t) -> None:\n device = _get_device_index(device)\n if device >= 0:\n torch._C._cuda_setDevice(device)", + "docstring": "Set the current device. Usage of this function is discouraged in favor of :any:. In most cases it's better to use `` environmental variable. Args: device (torch.device or int): selected device. This function is a no-op if this argument is negative.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:set_device arg:device arguments arg Assign Call If Compare Call" + }, + { + "library": "scipy", + "name": "_swap", + "source_code": "@staticmethod\ndef _swap(x):\n return (x[1], x[0])", + "docstring": "swap the members of x if this is a column-oriented matrix", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_csc.py", + "ast_data": "FunctionDef name:_swap arg:x arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_glyphs_with_font", + "source_code": "def get_glyphs_with_font(self, font, s, glyph_map=None, return_new_glyphs_only=False):\n if glyph_map is None:\n glyph_map = OrderedDict()\n if return_new_glyphs_only:\n glyph_map_new = OrderedDict()\n else:\n glyph_map_new = glyph_map\n xpositions = []\n glyph_ids = []\n for item in _text_helpers.layout(s, font):\n char_id = self._get_char_id(item.ft_object, ord(item.char))\n glyph_ids.append(char_id)\n xpositions.append(item.x)\n if char_id not in glyph_map:\n glyph_map_new[char_id] = item.ft_object.get_path()\n ypositions = [0] * len(xpositions)\n sizes = [1.0] * len(xpositions)\n rects = []\n return (list(zip(glyph_ids, xpositions, ypositions, sizes)), glyph_map_new, rects)", + "docstring": "Convert string *s* to vertices and codes using the provided ttf font.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\textpath.py", + "ast_data": "FunctionDef name:get_glyphs_with_font arg:self arg:font arg:s arg:glyph_map arg:return_new_glyphs_only arguments arg arg arg arg arg If Compare Assign Call If Assign Call Assign Assign Assign For Call Assign Call Call Call Call If Compare Assign Call Assign Call Assign Call Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_losses", + "source_code": "@tf_export(v1=['losses.get_losses'])\ndef get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES):\n return ops.get_collection(loss_collection, scope)", + "docstring": "Gets the list of losses from the loss_collection. Args: scope: An optional scope name for filtering the losses to return. loss_collection: Optional losses collection. Returns: a list of loss tensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\util.py", + "ast_data": "FunctionDef name:get_losses arg:scope arg:loss_collection arguments arg arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "download_onnx_from_url", + "source_code": "def download_onnx_from_url(url: str, model_dir: Optional[str]=None, progress: bool=True, check_hash: bool=False, file_name: Optional[str]=None) -> str:\n if model_dir is None:\n hub_dir = get_dir()\n model_dir = os.path.join(hub_dir, 'checkpoints')\n os.makedirs(model_dir, exist_ok=True)\n parts = urlparse(url)\n filename = os.path.basename(parts.path)\n if file_name is not None:\n filename = file_name\n cached_file = os.path.join(model_dir, filename)\n if not os.path.exists(cached_file):\n sys.stderr.write(f'Downloading: \"{url}\" to {cached_file}\\n')\n hash_prefix = None\n if check_hash:\n r = HASH_REGEX.search(filename)\n hash_prefix = r.group(1) if r else None\n download_url_to_file(url, cached_file, hash_prefix, progress=progress)\n return cached_file", + "docstring": "Load the ONNX model at the given URL. If downloaded file is a zip file, it will be automatically decompressed. If the object is already present in , it's deserialized and returned. The default value of `~torch.hub.get_dir``` will be used if not set. Example: >>> model = download_onnx_from_url('", + "type": "function", + "file_path": "kornia\\kornia\\feature\\lightglue_onnx\\utils\\download.py", + "ast_data": "FunctionDef name:download_onnx_from_url arg:url arg:model_dir arg:progress arg:check_hash arg:file_name arguments arg arg arg arg arg If Compare Assign Call Assign Call Call Assign Call Assign Call If Compare Assign Assign Call If Call Call Assign If Assign Call Assign Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "compute_padding", + "source_code": "def compute_padding(original_size: Union[int, Tuple[int, int]], window_size: Union[int, Tuple[int, int]], stride: Optional[Union[int, Tuple[int, int]]]=None) -> FullPadType:\n original_size = cast(Tuple[int, int], _pair(original_size))\n window_size = cast(Tuple[int, int], _pair(window_size))\n if stride is None:\n stride = window_size\n stride = cast(Tuple[int, int], _pair(stride))\n remainder_vertical = (original_size[0] - window_size[0]) % stride[0]\n remainder_horizontal = (original_size[1] - window_size[1]) % stride[1]\n if remainder_vertical != 0:\n vertical_padding = stride[0] - remainder_vertical\n else:\n vertical_padding = 0\n if remainder_horizontal != 0:\n horizontal_padding = stride[1] - remainder_horizontal\n else:\n horizontal_padding = 0\n if vertical_padding % 2 == 0:\n top_padding = bottom_padding = vertical_padding // 2\n else:\n top_padding = vertical_padding // 2\n bottom_padding = ceil(vertical_padding / 2)\n if horizontal_padding % 2 == 0:\n left_padding = right_padding = horizontal_padding // 2\n else:\n left_padding = horizontal_padding // 2\n right_padding = ceil(horizontal_padding / 2)\n padding = (int(top_padding), int(bottom_padding), int(left_padding), int(right_padding))\n return cast(FullPadType, padding)", + "docstring": "Compute required padding to ensure chaining of :func: and :func: produces expected result. Args: original_size: the size of the original tensor. window_size: the size of the sliding window used while extracting patches. stride: The stride of the sliding window. Optional: if not specified, window_size will be used. Return: The required padding as a tuple of four ints: (top, bottom, left, right) Example: >>> image = torch.arange(12).view(1, 1, 4, 3) >>> padding = compute_padding((4,3), (3,3)) >>> out = extract_tensor_patches(image, window_size=(3, 3), stride=(3, 3), padding=padding) >>> combine_tensor_patches(out, original_size=(4, 3), window_size=(3, 3), stride=(3, 3), unpadding=padding) tensor([[[[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]]]) .. note:: This function will be implicitly used in :func: and :func: if is set to True.", + "type": "function", + "file_path": "kornia\\kornia\\contrib\\extract_patches.py", + "ast_data": "FunctionDef name:compute_padding arg:original_size arg:window_size arg:stride arguments arg arg arg Assign Call Call Assign Call Call If Compare Assign Assign Call Call Assign Assign If Compare Assign Assign If Compare Assign Assign If Compare Assign Assign Assign Call If Compare Assign Assign Assign Call Assign Call Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "update_deps_for_fusions", + "source_code": "def update_deps_for_fusions(self):\n for node in self.fusions:\n fusion = self.fusions[node]\n for fused_neighbor in fusion:\n self.deps[node].update(self.deps[fused_neighbor] - fusion)\n for user in fused_neighbor.users:\n if user not in fusion:\n self.deps[user].add(node)", + "docstring": "Updates graph of dependencies so that: - nodes from the same fusion depend on the same set of outer nodes, - outer nodes depending on a fusion depend on all nodes in that fusion.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py", + "ast_data": "FunctionDef name:update_deps_for_fusions arg:self arguments arg For Assign For Call For If Compare Call" + }, + { + "library": "django", + "name": "delete_queryset", + "source_code": "def delete_queryset(self, request, queryset):\n queryset.delete()", + "docstring": "Given a queryset, delete it from the database.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:delete_queryset arg:self arg:request arg:queryset arguments arg arg arg Call" + }, + { + "library": "pytorch", + "name": "set_stream", + "source_code": "def set_stream(stream: Stream):\n if stream is None:\n return\n torch._C._mtia_setCurrentStream(stream)", + "docstring": "Set the current stream.This is a wrapper API to set the stream. Usage of this function is discouraged in favor of the ``.", + "type": "function", + "file_path": "pytorch\\torch\\mtia\\__init__.py", + "ast_data": "FunctionDef name:set_stream arg:stream arguments arg If Compare Return return:no Call" + }, + { + "library": "scipy", + "name": "_broadcast_array_shapes_remove_axis", + "source_code": "def _broadcast_array_shapes_remove_axis(arrays, axis=None):\n shapes = [arr.shape for arr in arrays]\n return _broadcast_shapes_remove_axis(shapes, axis)", + "docstring": "Broadcast shapes of arrays, dropping specified axes Given a sequence of arrays and an integer or tuple , find the shape of the broadcast result after consuming/dropping . In other words, return output shape of a typical hypothesis test on vectorized along . Examples -------- >>> import numpy as np >>> from scipy.stats._axis_nan_policy import _broadcast_array_shapes_remove_axis >>> a = np.zeros((5, 2, 1)) >>> b = np.zeros((9, 3)) >>> _broadcast_array_shapes_remove_axis((a, b), 1) (5, 3)", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_axis_nan_policy.py", + "ast_data": "FunctionDef name:_broadcast_array_shapes_remove_axis arg:arrays arg:axis arguments arg arg Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "solve_discrete_lyapunov", + "source_code": "@_apply_over_batch(('a', 2), ('q', 2))\ndef solve_discrete_lyapunov(a, q, method=None):\n a = np.asarray(a)\n q = np.asarray(q)\n if method is None:\n if a.shape[0] >= 10:\n method = 'bilinear'\n else:\n method = 'direct'\n meth = method.lower()\n if meth == 'direct':\n x = _solve_discrete_lyapunov_direct(a, q)\n elif meth == 'bilinear':\n x = _solve_discrete_lyapunov_bilinear(a, q)\n else:\n raise ValueError(f'Unknown solver {method}')\n return x", + "docstring": "Solves the discrete Lyapunov equation :math:. Parameters ---------- a, q : (M, M) array_like Square matrices corresponding to A and Q in the equation above respectively. Must have the same shape. method : {'direct', 'bilinear'}, optional Type of solver. If not given, chosen to be `M^2(BX+XB'=-C)B=(A-I)(A+I)^{-1}C=2(A' + I)^{-1} Q (A + I)^{-1}aqx`: >>> import numpy as np >>> from scipy import linalg >>> a = np.array([[0.2, 0.5],[0.7, -0.9]]) >>> q = np.eye(2) >>> x = linalg.solve_discrete_lyapunov(a, q) >>> x array([[ 0.70872893, 1.43518822], [ 1.43518822, -2.4266315 ]]) >>> np.allclose(a.dot(x).dot(a.T)-x, -q) True", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_solvers.py", + "ast_data": "FunctionDef name:solve_discrete_lyapunov arg:a arg:q arg:method arguments arg arg arg Assign Call Assign Call If Compare If Compare Assign Assign Assign Call If Compare Assign Call If Compare Assign Call Raise Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_constrained_layout_pads", + "source_code": "@_api.deprecated('3.6', alternative='figure.get_layout_engine().set()', pending=True)\ndef set_constrained_layout_pads(self, **kwargs):\n if isinstance(self.get_layout_engine(), ConstrainedLayoutEngine):\n self.get_layout_engine().set(**kwargs)", + "docstring": "Set padding for `constrainedlayout_guidefigure.constrained_layout.w_padfigure.constrained_layout.h_padfigure.constrained_layout.wspacefigure.constrained_layout.hspace` Height padding between subplots, expressed as a fraction of the subplot width. The total padding ends up being h_pad + hspace.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:set_constrained_layout_pads arg:self arguments arg arg If Call Call Call Call Call" + }, + { + "library": "scrapy", + "name": "Selector", + "source_code": "class Selector(_ParselSelector, object_ref):\n __slots__ = ['response']\n selectorlist_cls = SelectorList\n\n def __init__(self, response: TextResponse | None=None, text: str | None=None, type: str | None=None, root: Any | None=_NOT_SET, **kwargs: Any):\n if response is not None and text is not None:\n raise ValueError(f'{self.__class__.__name__}.__init__() received both response and text')\n st = _st(response, type)\n if text is not None:\n response = _response_from_text(text, st)\n if response is not None:\n text = response.text\n kwargs.setdefault('base_url', get_base_url(response))\n self.response = response\n if root is not _NOT_SET:\n kwargs['root'] = root\n super().__init__(text=text, type=st, **kwargs)", + "docstring": "An instance of :class: is a wrapper over response to select certain parts of its content. `~scrapy.http.HtmlResponse~scrapy.http.XmlResponse~scrapy.http.HtmlResponse~scrapy.http.XmlResponse~scrapy.http.TextResponse` is set, the selector type will be forced and no detection will occur.", + "type": "class", + "file_path": "scrapy\\scrapy\\selector\\unified.py", + "ast_data": "ClassDef name:Selector Assign Assign FunctionDef name:__init__ arg:self arg:response arg:text arg:type arg:root arguments arg arg arg arg arg arg If BoolOp Compare Compare Raise Call Assign Call If Compare Assign Call If Compare Assign Call Call Assign If Compare Assign Call Call" + }, + { + "library": "pytorch", + "name": "gumbel_softmax", + "source_code": "def gumbel_softmax(logits: Tensor, tau: float=1, hard: bool=False, eps: float=1e-10, dim: int=-1) -> Tensor:\n if has_torch_function_unary(logits):\n return handle_torch_function(gumbel_softmax, (logits,), logits, tau=tau, hard=hard, eps=eps, dim=dim)\n if eps != 1e-10:\n warnings.warn('`eps` parameter is deprecated and has no effect.')\n gumbels = -torch.empty_like(logits, memory_format=torch.legacy_contiguous_format).exponential_().log()\n gumbels = (logits + gumbels) / tau\n y_soft = gumbels.softmax(dim)\n if hard:\n index = y_soft.max(dim, keepdim=True)[1]\n y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)\n ret = y_hard - y_soft.detach() + y_soft\n else:\n ret = y_soft\n return ret", + "docstring": "Sample from the Gumbel-Softmax distribution (_ _) and optionally discretize. Args: logits: unnormalized log probabilities tau: non-negative scalar temperature hard: if `logitsdimhardy_hard - y_soft.detach() + y_soft` It achieves two things: - makes the output value exactly one-hot (since we add then subtract y_soft value) - makes the gradient equal to y_soft gradient (since we strip all other gradients) Examples:: >>> logits = torch.randn(20, 32) >>> # Sample soft categorical using reparametrization trick: >>> F.gumbel_softmax(logits, tau=1, hard=False) >>> # Sample hard categorical using \"Straight-through\" trick: >>> F.gumbel_softmax(logits, tau=1, hard=True) .. _Link 1: .. _Link 2:", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:gumbel_softmax arg:logits arg:tau arg:hard arg:eps arg:dim arguments arg arg arg arg arg If Call Return return:yes Call If Compare Call Assign Call Call Call Assign Assign Call If Assign Call Assign Call Call Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "adaptive_max_pool1d_with_indices", + "source_code": "def adaptive_max_pool1d_with_indices(input: Tensor, output_size: BroadcastingList1[int], return_indices: bool=False) -> tuple[Tensor, Tensor]:\n if has_torch_function_unary(input):\n return handle_torch_function(adaptive_max_pool1d_with_indices, (input,), input, output_size, return_indices=return_indices)\n return torch.adaptive_max_pool1d(input, output_size)", + "docstring": "adaptive_max_pool1d(input, output_size, return_indices=False) Applies a 1D adaptive max pooling over an input signal composed of several input planes. See :class: for details and output shape. Args: output_size: the target output size (single integer) return_indices: whether to return pooling indices. Default: ``", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:adaptive_max_pool1d_with_indices arg:input arg:output_size arg:return_indices arguments arg arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_download_20newsgroups", + "source_code": "def _download_20newsgroups(target_dir, cache_path, n_retries, delay):\n train_path = os.path.join(target_dir, TRAIN_FOLDER)\n test_path = os.path.join(target_dir, TEST_FOLDER)\n os.makedirs(target_dir, exist_ok=True)\n logger.info('Downloading dataset from %s (14 MB)', ARCHIVE.url)\n archive_path = _fetch_remote(ARCHIVE, dirname=target_dir, n_retries=n_retries, delay=delay)\n logger.debug('Decompressing %s', archive_path)\n with tarfile.open(archive_path, 'r:gz') as fp:\n fp.extractall(path=target_dir, filter='data')\n with suppress(FileNotFoundError):\n os.remove(archive_path)\n cache = dict(train=load_files(train_path, encoding='latin1'), test=load_files(test_path, encoding='latin1'))\n compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')\n with open(cache_path, 'wb') as f:\n f.write(compressed_content)\n shutil.rmtree(target_dir)\n return cache", + "docstring": "Download the 20 newsgroups data and stored it as a zipped pickle.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\datasets\\_twenty_newsgroups.py", + "ast_data": "FunctionDef name:_download_20newsgroups arg:target_dir arg:cache_path arg:n_retries arg:delay arguments arg arg arg arg Assign Call Assign Call Call Call Assign Call Call With Call Call With Call Call Assign Call Call Call Assign Call Call With Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "trainable_variables", + "source_code": "@property\ndef trainable_variables(self):\n return tuple(self._flatten(predicate=_is_trainable_variable, expand_composites=True))", + "docstring": "Sequence of trainable variables owned by this module and its submodules. Note: this method uses reflection to find variables on the current instance and submodules. For performance reasons you may wish to cache the result of calling this method if you don't expect the return value to change. Returns: A sequence of variables for the current module (sorted by attribute name) followed by variables from all submodules recursively (breadth first).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\module\\module.py", + "ast_data": "FunctionDef name:trainable_variables arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "randperm", + "source_code": "def randperm(n: int, ensure_perm: bool=True, **kwargs: Any) -> Tensor:\n perm = torch.randperm(n, **kwargs)\n if ensure_perm:\n while torch.all(torch.eq(perm, torch.arange(n, device=perm.device))):\n perm = torch.randperm(n, **kwargs)\n return perm", + "docstring": "with the ability to ensure the different arrangement generated.", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\random_generator\\utils.py", + "ast_data": "FunctionDef name:randperm arg:n arg:ensure_perm arguments arg arg arg Assign Call If While Call Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "create_state", + "source_code": "def create_state(self, state_manager):\n default_num_buckets = self.categorical_column.num_buckets if self._is_v2_column else self.categorical_column._num_buckets\n num_buckets = getattr(self.categorical_column, 'num_buckets', default_num_buckets)\n embedding_shape = (num_buckets, self.dimension)\n state_manager.create_variable(self, name='embedding_weights', shape=embedding_shape, dtype=dtypes.float32, trainable=self.trainable, use_resource=True, initializer=self.initializer)", + "docstring": "Creates the embedding lookup variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:create_state arg:self arg:state_manager arguments arg arg Assign Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "UnexpectedSubprocessExitError", + "source_code": "@tf_export('__internal__.distribute.multi_process_runner.UnexpectedSubprocessExitError', v1=[])\nclass UnexpectedSubprocessExitError(RuntimeError):\n\n def __init__(self, msg, mpr_result):\n super(UnexpectedSubprocessExitError, self).__init__(msg)\n self.mpr_result = mpr_result", + "docstring": "An error indicating there is at least one subprocess with unexpected exit. When this is raised, a namedtuple object representing the multi-process run result can be retrieved by 's attribute. See for more information.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py", + "ast_data": "ClassDef name:UnexpectedSubprocessExitError FunctionDef name:__init__ arg:self arg:msg arg:mpr_result arguments arg arg arg Call Call Assign Call" + }, + { + "library": "pytorch", + "name": "__call__", + "source_code": "def __call__(self, module, inputs):\n setattr(module, self._tensor_name, self.apply_mask(module))", + "docstring": "Multiply the mask into original tensor and store the result. Multiplies the mask (stored in `apply_mask`. Args: module (nn.Module): module containing the tensor to prune inputs: not used.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\utils\\prune.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:module arg:inputs arguments arg arg arg Call Call" + }, + { + "library": "pytorch", + "name": "is_available", + "source_code": "def is_available() -> bool:\n acc = current_accelerator()\n if acc is None:\n return False\n mod = torch.get_device_module(acc)\n return mod.is_available()", + "docstring": "Check if the current accelerator is available at runtime: it was build, all the required drivers are available and at least one device is visible. See :ref: for details. Returns: bool: A boolean indicating if there is an available :ref:. .. note:: This API delegates to the device-specific version of . On CUDA, when the environment variable `multiprocessing-poison-fork-note`. Example:: >>> assert torch.accelerator.is_available() \"No available accelerators detected.\"", + "type": "function", + "file_path": "pytorch\\torch\\accelerator\\__init__.py", + "ast_data": "FunctionDef name:is_available arguments Assign Call If Compare Return return:yes Assign Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "inv", + "source_code": "@array_function_dispatch(_unary_dispatcher)\ndef inv(a):\n a, wrap = _makearray(a)\n _assert_stacked_square(a)\n t, result_t = _commonType(a)\n signature = 'D->D' if isComplexType(t) else 'd->d'\n with errstate(call=_raise_linalgerror_singular, invalid='call', over='ignore', divide='ignore', under='ignore'):\n ainv = _umath_linalg.inv(a, signature=signature)\n return wrap(ainv.astype(result_t, copy=False))", + "docstring": "Compute the inverse of a matrix. Given a square matrix , return the matrix satisfying `aanumpy.linalgaLinAlgErroraLinAlgErrorLinAlgErrornumpy.linalg.cond` digits of accuracy on top of what would be lost to the numerical method due to loss of precision from arithmetic methods. >>> from numpy.linalg import cond >>> cond(a) np.float64(8.659885634118668e+17) # may vary It is also possible to detect ill-conditioning by inspecting the matrix's singular values directly. The ratio between the largest and the smallest singular value is the condition number: >>> from numpy.linalg import svd >>> sigma = svd(a, compute_uv=False) # Do not compute singular vectors >>> sigma.max()/sigma.min() 8.659885634118668e+17 # may vary", + "type": "function", + "file_path": "numpy\\numpy\\linalg\\_linalg.py", + "ast_data": "FunctionDef name:inv arg:a arguments arg Assign Call Call Assign Call Assign Call With Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, X, y=None, copy=True):\n check_is_fitted(self)\n X = validate_data(self, X, copy=copy, dtype=FLOAT_DTYPES, reset=False)\n X -= self._x_mean\n X /= self._x_std\n x_scores = np.dot(X, self.x_rotations_)\n if y is not None:\n y = check_array(y, input_name='y', ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES)\n if y.ndim == 1:\n y = y.reshape(-1, 1)\n y -= self._y_mean\n y /= self._y_std\n y_scores = np.dot(y, self.y_rotations_)\n return (x_scores, y_scores)\n return x_scores", + "docstring": "Apply the dimension reduction. Parameters ---------- X : array-like of shape (n_samples, n_features) Samples to transform. y : array-like of shape (n_samples, n_targets), default=None Target vectors. copy : bool, default=True Whether to copy and , or perform in-place normalization. Returns ------- x_scores, y_scores : array-like or tuple of array-like Return if is not given, otherwise.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cross_decomposition\\_pls.py", + "ast_data": "FunctionDef name:transform arg:self arg:X arg:y arg:copy arguments arg arg arg arg Call Assign Call Assign Call If Compare Assign Call If Compare Assign Call Assign Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "func_to_graphable", + "source_code": "def func_to_graphable(func):\n return pytree.tree_flatten(_ConstantFunction(func))", + "docstring": "Pack and flatten a function type into graphable types. This is useful for legalizing the function argument of .", + "type": "function", + "file_path": "pytorch\\torch\\_higher_order_ops\\flat_apply.py", + "ast_data": "FunctionDef name:func_to_graphable arg:func arguments arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "bottom_left", + "source_code": "@property\ndef bottom_left(self) -> torch.Tensor:\n out = self.top_left\n out[..., 1] += self.height\n return out", + "docstring": "The [x y] position of the top-left coordinate of the bounding box.", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\face_detection.py", + "ast_data": "FunctionDef name:bottom_left arg:self arguments arg Assign Return return:yes" + }, + { + "library": "django", + "name": "deferrable_sql", + "source_code": "def deferrable_sql(self):\n return ''", + "docstring": "Return the SQL to make a constraint \"initially deferred\" during a CREATE TABLE statement.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\operations.py", + "ast_data": "FunctionDef name:deferrable_sql arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "identical", + "source_code": "@final\ndef identical(self, other) -> bool:\n return self.equals(other) and all((getattr(self, c, None) == getattr(other, c, None) for c in self._comparables)) and (type(self) == type(other)) and (self.dtype == other.dtype)", + "docstring": "Similar to equals, but checks that object attributes and types are also equal. Parameters ---------- other : Index The Index object you want to compare with the current Index object. Returns ------- bool If two Index objects have equal elements and same type True, otherwise False. See Also -------- Index.equals: Determine if two Index object are equal. Index.has_duplicates: Check if the Index has duplicate values. Index.is_unique: Return if the index has unique values. Examples -------- >>> idx1 = pd.Index([\"1\", \"2\", \"3\"]) >>> idx2 = pd.Index([\"1\", \"2\", \"3\"]) >>> idx2.identical(idx1) True >>> idx1 = pd.Index([\"1\", \"2\", \"3\"], name=\"A\") >>> idx2 = pd.Index([\"1\", \"2\", \"3\"], name=\"B\") >>> idx2.identical(idx1) False", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:identical arg:self arg:other arguments arg arg Return return:yes BoolOp Call Call Compare Call Call Compare Call Call Compare" + }, + { + "library": "scikit-learn", + "name": "stable_cumsum", + "source_code": "def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):\n out = np.cumsum(arr, axis=axis, dtype=np.float64)\n expected = np.sum(arr, axis=axis, dtype=np.float64)\n if not np.allclose(out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True):\n warnings.warn('cumsum was found to be unstable: its last element does not correspond to sum', RuntimeWarning)\n return out", + "docstring": "Use high precision for cumsum and check that final value matches sum. Warns if the final cumulative sum does not match the sum (up to the chosen tolerance). Parameters ---------- arr : array-like To be cumulatively summed as flat. axis : int, default=None Axis along which the cumulative sum is computed. The default (None) is to compute the cumsum over the flattened array. rtol : float, default=1e-05 Relative tolerance, see ``. Returns ------- out : ndarray Array with the cumulative sums along the chosen axis.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\extmath.py", + "ast_data": "FunctionDef name:stable_cumsum arg:arr arg:axis arg:rtol arg:atol arguments arg arg arg arg Assign Call Assign Call If Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "construct_fast", + "source_code": "@classmethod\ndef construct_fast(cls, t, c, k, extrapolate=True, axis=0):\n self = object.__new__(cls)\n self.t, self.c, self.k = (t, c, k)\n self.extrapolate = extrapolate\n self.axis = axis\n return self", + "docstring": "Construct a spline without making checks. Accepts same parameters as the regular constructor. Input arrays and must of correct shape and dtype.", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_bsplines.py", + "ast_data": "FunctionDef name:construct_fast arg:cls arg:t arg:c arg:k arg:extrapolate arg:axis arguments arg arg arg arg arg arg Assign Call Assign Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_private_register_pytree_node", + "source_code": "def _private_register_pytree_node(cls: type[Any], flatten_fn: FlattenFunc, unflatten_fn: UnflattenFunc, *, serialized_type_name: Optional[str]=None, to_dumpable_context: Optional[ToDumpableContextFn]=None, from_dumpable_context: Optional[FromDumpableContextFn]=None, flatten_with_keys_fn: Optional[FlattenWithKeysFunc]=None) -> None:\n with _NODE_REGISTRY_LOCK:\n if cls in SUPPORTED_NODES:\n warnings.warn(f'{cls} is already registered as pytree node. Overwriting the previous registration.')\n node_def = NodeDef(cls, flatten_fn, unflatten_fn, flatten_with_keys_fn)\n SUPPORTED_NODES[cls] = node_def\n if (to_dumpable_context is None) ^ (from_dumpable_context is None):\n raise ValueError(f'Both to_dumpable_context and from_dumpable_context for {cls} must be None or registered.')\n if serialized_type_name is None:\n serialized_type_name = NO_SERIALIZED_TYPE_NAME_FOUND\n serialize_node_def = _SerializeNodeDef(cls, serialized_type_name, to_dumpable_context, from_dumpable_context)\n SUPPORTED_SERIALIZED_TYPES[cls] = serialize_node_def\n SERIALIZED_TYPE_TO_PYTHON_TYPE[serialized_type_name] = cls", + "docstring": "This is an internal function that is used to register a pytree node type for the Python pytree only. End-users should use :func: instead.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\_pytree.py", + "ast_data": "FunctionDef name:_private_register_pytree_node arg:cls arg:flatten_fn arg:unflatten_fn arguments arg arg arg arg arg arg arg With If Compare Call Assign Call Assign If Compare Compare Raise Call If Compare Assign Assign Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "resolve", + "source_code": "def resolve(d):\n return canonicalize(d, default=current())", + "docstring": "Canonicalize with current device as default.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\device_util.py", + "ast_data": "FunctionDef name:resolve arg:d arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "validate_distributed_dataset_inputs", + "source_code": "def validate_distributed_dataset_inputs(distribution_strategy, x, y, sample_weights=None):\n x_values_list = validate_per_replica_inputs(distribution_strategy, x)\n if y is not None:\n y_values_list = validate_per_replica_inputs(distribution_strategy, y)\n else:\n y_values_list = None\n if sample_weights is not None:\n sample_weights_list = validate_per_replica_inputs(distribution_strategy, sample_weights)\n else:\n sample_weights_list = None\n return (x_values_list, y_values_list, sample_weights_list)", + "docstring": "Validate all the components of a DistributedValue Dataset input. Args: distribution_strategy: The current DistributionStrategy used to call /. x: Input Dataset DistributedValue object. For example, when we use this is a PerReplica object with a tensor for each device set in the dict. x can also be a tuple or dict. The keys of the dict should match the names of the input layers of the model. y: Target Dataset DistributedValue object. For example, when we use this is a PerReplica object with a tensor for each device set in the dict. y can also be a tuple or dict. The keys of the dict should match the names of the output layers of the model. sample_weights: Sample weights Dataset DistributedValue object. For example, when we use this is a PerReplica object with a tensor for each device set in the dict. Returns: The unwrapped values list of the x and y DistributedValues inputs. Raises: ValueError: If x and y do not have support for being evaluated as tensors. or if x and y contain elements that are not tensors or if x and y contain elements that have a shape or dtype mismatch.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py", + "ast_data": "FunctionDef name:validate_distributed_dataset_inputs arg:distribution_strategy arg:x arg:y arg:sample_weights arguments arg arg arg arg Assign Call If Compare Assign Call Assign If Compare Assign Call Assign Return return:yes" + }, + { + "library": "django", + "name": "__getitem__", + "source_code": "def __getitem__(self, index):\n if isinstance(index, slice):\n return [self._get_single_external(i) for i in range(*index.indices(len(self)))]\n else:\n index = self._checkindex(index)\n return self._get_single_external(index)", + "docstring": "Get the item(s) at the specified index/slice.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:index arguments arg arg If Call Return return:yes Call Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "between", + "source_code": "def between(self, left, right, inclusive: Literal['both', 'neither', 'left', 'right']='both') -> Series:\n if inclusive == 'both':\n lmask = self >= left\n rmask = self <= right\n elif inclusive == 'left':\n lmask = self >= left\n rmask = self < right\n elif inclusive == 'right':\n lmask = self > left\n rmask = self <= right\n elif inclusive == 'neither':\n lmask = self > left\n rmask = self < right\n else:\n raise ValueError(\"Inclusive has to be either string of 'both','left', 'right', or 'neither'.\")\n return lmask & rmask", + "docstring": "Return boolean Series equivalent to left >> s = pd.Series([2, 0, 4, 8, np.nan]) Boundary values are included by default: >>> s.between(1, 4) 0 True 1 False 2 True 3 False 4 False dtype: bool With set to `leftright` can be any scalar value: >>> s = pd.Series([\"Alice\", \"Bob\", \"Carol\", \"Eve\"]) >>> s.between(\"Anna\", \"Daniel\") 0 False 1 True 2 True 3 False dtype: bool", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:between arg:self arg:left arg:right arg:inclusive arguments arg arg arg arg If Compare Assign Compare Assign Compare If Compare Assign Compare Assign Compare If Compare Assign Compare Assign Compare If Compare Assign Compare Assign Compare Raise Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_major_locator", + "source_code": "def get_major_locator(self):\n return self.major.locator", + "docstring": "Get the locator of the major ticker.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:get_major_locator arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "from_device", + "source_code": "@classmethod\ndef from_device(cls, device: str) -> 'Mesh':\n return cls._new_object(single_device=device)", + "docstring": "Constructs a single device mesh from a device string.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py", + "ast_data": "FunctionDef name:from_device arg:cls arg:device arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "roots", + "source_code": "def roots(self, discontinuity=True, extrapolate=None):\n return self.solve(0, discontinuity, extrapolate)", + "docstring": "Find real roots of the piecewise polynomial. Parameters ---------- discontinuity : bool, optional Whether to report sign changes across discontinuities at breakpoints as roots. extrapolate : {bool, 'periodic', None}, optional If bool, determines whether to return roots from the polynomial extrapolated based on first and last intervals, 'periodic' works the same as False. If None (default), use . Returns ------- roots : ndarray Roots of the polynomial(s). If the PPoly object describes multiple polynomials, the return value is an object array whose each element is an ndarray containing the roots. See Also -------- PPoly.solve", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_interpolate.py", + "ast_data": "FunctionDef name:roots arg:self arg:discontinuity arg:extrapolate arguments arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "process_selected", + "source_code": "def process_selected(self, ind, xs, ys):\n pass", + "docstring": "Default \"do nothing\" implementation of the method. Parameters ---------- ind : list of int The indices of the selected vertices. xs, ys : array-like The coordinates of the selected vertices.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\lines.py", + "ast_data": "FunctionDef name:process_selected arg:self arg:ind arg:xs arg:ys arguments arg arg arg arg" + }, + { + "library": "tensorflow", + "name": "to_tensors", + "source_code": "@doc_controls.do_not_doc_inheritable\ndef to_tensors(self, value):\n return super().to_tensors(value)", + "docstring": "See tf.types.experimental.TraceType base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py", + "ast_data": "FunctionDef name:to_tensors arg:self arg:value arguments arg arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_getitem_slice", + "source_code": "def _getitem_slice(self, slobj: slice) -> Self:\n res = self._data[slobj]\n result = type(self)._simple_new(res, name=self._name, refs=self._references)\n if '_engine' in self._cache:\n reverse = slobj.step is not None and slobj.step < 0\n result._engine._update_from_sliced(self._engine, reverse=reverse)\n return result", + "docstring": "Fastpath for __getitem__ when we know we have a slice.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_getitem_slice arg:self arg:slobj arguments arg arg Assign Assign Call Call If Compare Assign BoolOp Compare Compare Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "Vbox", + "source_code": "class Vbox(Box):\n\n def __init__(self, height: float, depth: float):\n super().__init__(0.0, height, depth)", + "docstring": "A box with only height (zero width).", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py", + "ast_data": "ClassDef name:Vbox FunctionDef name:__init__ arg:self arg:height arg:depth arguments arg arg arg Call Call" + }, + { + "library": "pytorch", + "name": "is_bw", + "source_code": "@property\ndef is_bw(self):\n return torch._C._current_graph_task_id() != -1", + "docstring": "A boolean marking if this is currently running during the backward pass or not", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\_tools\\mod_tracker.py", + "ast_data": "FunctionDef name:is_bw arg:self arguments arg Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "nrows", + "source_code": "def nrows(self):\n if self._nrows is not None:\n return self._nrows\n nsplits = tensor_shape.dimension_at_index(self._row_splits.shape, 0)\n if nsplits.value is None:\n return array_ops.shape(self._row_splits, out_type=self.dtype)[0] - 1\n else:\n return constant_op.constant(nsplits.value - 1, dtype=self.dtype)", + "docstring": "Returns the number of rows created by this . Returns: scalar integer Tensor", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:nrows arg:self arguments arg If Compare Return return:yes Assign Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, process_group, subgroup, start_localSGD_iter, post_local_gradient_allreduce=True):\n logger.info('Local SGD will be started after %s iterations', start_localSGD_iter)\n self.process_group = process_group\n self.subgroup = subgroup\n self.start_localSGD_iter = start_localSGD_iter\n self.post_local_gradient_allreduce = post_local_gradient_allreduce\n self.iter = 0", + "docstring": "Initialize state object with given parameters and log when localSGD start.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\post_localSGD_hook.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:process_group arg:subgroup arg:start_localSGD_iter arg:post_local_gradient_allreduce arguments arg arg arg arg arg Call Assign Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "validate_inference_rewrite_for_variables", + "source_code": "def validate_inference_rewrite_for_variables(graph: ops.Graph):\n if not any((x.type == 'GuaranteeConst' for x in graph.get_operations())):\n raise RuntimeError('No GuaranteeConst ops found in the graph after running tpu.rewrite_for_inference(...). Please check that you are using tf.get_variable() to create and access variables in your tpu computation.')", + "docstring": "Validates whether rewrite_for_inference() 'worked' for variables. The rewrite_for_inference() method is supposed to append GuaranteeConstOps after ReadVariableOps, but this mechanism works only if you are using tf.compat.v1.get_variable() to create and access variables in your tpu computation. This validation method can be called immediately after calling tpu.rewrite_for_inference() to check whether GuaranteeConstOps where added to the graph. Typical usages: tpu.validate_inference_rewrite_for_variables( tf.compat.v1.get_default_graph()) tpu.validate_inference_rewrite_for_variables(sess.graph) Args: graph: The graph which needs to be validated. Raises: RuntimeError: if validation failed.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py", + "ast_data": "FunctionDef name:validate_inference_rewrite_for_variables arg:graph arguments arg If Call Compare Call Raise Call" + }, + { + "library": "authlib", + "name": "check_key_op", + "source_code": "def check_key_op(self, operation):\n key_ops = self.tokens.get('key_ops')\n if key_ops is not None and operation not in key_ops:\n raise ValueError(f'Unsupported key_op \"{operation}\"')\n if operation in self.PRIVATE_KEY_OPS and self.public_only:\n raise ValueError(f'Invalid key_op \"{operation}\" for public key')\n use = self.tokens.get('use')\n if use:\n if operation in ['sign', 'verify']:\n if use != 'sig':\n raise InvalidUseError()\n elif operation in ['decrypt', 'encrypt', 'wrapKey', 'unwrapKey']:\n if use != 'enc':\n raise InvalidUseError()", + "docstring": "Check if the given key_op is supported by this key. :param operation: key operation value, such as \"sign\", \"encrypt\". :raise: ValueError", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7517\\base_key.py", + "ast_data": "FunctionDef name:check_key_op arg:self arg:operation arguments arg arg Assign Call If BoolOp Compare Compare Raise Call If BoolOp Compare Raise Call Assign Call If If Compare If Compare Raise Call If Compare If Compare Raise Call" + }, + { + "library": "numpy", + "name": "sum", + "source_code": "def sum(self, axis=None, dtype=None, out=None):\n return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis)", + "docstring": "Returns the sum of the matrix elements, along the given axis. Refer to for full documentation. See Also -------- numpy.sum Notes ----- This is the same as , except that where an would be returned, a object is returned instead. Examples -------- >>> x = np.matrix([[1, 2], [4, 3]]) >>> x.sum() 10 >>> x.sum(axis=1) matrix([[3], [7]]) >>> x.sum(axis=1, dtype='float') matrix([[3.], [7.]]) >>> out = np.zeros((2, 1), dtype='float') >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out)) matrix([[3.], [7.]])", + "type": "method", + "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py", + "ast_data": "FunctionDef name:sum arg:self arg:axis arg:dtype arg:out arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "on_clicked", + "source_code": "def on_clicked(self, func):\n return self._observers.connect('clicked', lambda text: func(text))", + "docstring": "Connect the callback function *func* to button click events. Parameters ---------- func : callable When the button is clicked, call *func* with button label. When all buttons are cleared, call *func* with None. The callback func must have the signature:: def func(label: str | None) -> Any Return values may exist, but are ignored. Returns ------- A connection id, which can be used to disconnect the callback.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:on_clicked arg:self arg:func arguments arg arg Return return:yes Call arguments arg Call" + }, + { + "library": "pytorch", + "name": "_get_liveness", + "source_code": "@staticmethod\ndef _get_liveness(weakrefs: list[list[Optional[StorageWeakRefWrapper]]]) -> list[list[bool]]:\n if len(weakrefs) == 0:\n return []\n return [pytree.tree_map(is_live, outputs) for outputs in weakrefs]", + "docstring": "Maps weakrefs to true if the reference is alive and false otherwise", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", + "ast_data": "FunctionDef name:_get_liveness arg:weakrefs arguments arg If Compare Call Return return:no Return return:yes Call" + }, + { + "library": "django", + "name": "sync_apps", + "source_code": "def sync_apps(self, connection, app_labels):\n with connection.cursor() as cursor:\n tables = connection.introspection.table_names(cursor)\n all_models = [(app_config.label, router.get_migratable_models(app_config, connection.alias, include_auto_created=False)) for app_config in apps.get_app_configs() if app_config.models_module is not None and app_config.label in app_labels]\n\n def model_installed(model):\n opts = model._meta\n converter = connection.introspection.identifier_converter\n return not (converter(opts.db_table) in tables or (opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))\n manifest = {app_name: list(filter(model_installed, model_list)) for app_name, model_list in all_models}\n if self.verbosity >= 1:\n self.stdout.write(' Creating tables...')\n with connection.schema_editor() as editor:\n for app_name, model_list in manifest.items():\n for model in model_list:\n if not model._meta.can_migrate(connection):\n continue\n if self.verbosity >= 3:\n self.stdout.write(' Processing %s.%s model' % (app_name, model._meta.object_name))\n if self.verbosity >= 1:\n self.stdout.write(' Creating table %s' % model._meta.db_table)\n editor.create_model(model)\n if self.verbosity >= 1:\n self.stdout.write(' Running deferred SQL...')", + "docstring": "Run the old syncdb-style operation on a list of app_labels.", + "type": "method", + "file_path": "django\\django\\core\\management\\commands\\migrate.py", + "ast_data": "FunctionDef name:sync_apps arg:self arg:connection arg:app_labels arguments arg arg arg With Call Assign Call Assign Call Call BoolOp Compare Compare FunctionDef name:model_installed arg:model arguments arg Assign Assign Return return:yes BoolOp Compare Call BoolOp Compare Call Assign Call Call If Compare Call With Call For Call For If Call If Compare Call If Compare Call Call If Compare Call" + }, + { + "library": "django", + "name": "coord_dim", + "source_code": "@property\ndef coord_dim(self):\n return capi.get_coord_dim(self.ptr)", + "docstring": "Return the coordinate dimension of the Geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:coord_dim arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "convert_mesh_to_paths", + "source_code": "@staticmethod\ndef convert_mesh_to_paths(tri):\n triangles = tri.get_masked_triangles()\n verts = np.stack((tri.x[triangles], tri.y[triangles]), axis=-1)\n return [mpath.Path(x) for x in verts]", + "docstring": "Convert a given mesh into a sequence of objects. This function is primarily of use to implementers of backends that do not directly support meshes.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:convert_mesh_to_paths arg:tri arguments arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "bulk_batch_size", + "source_code": "def bulk_batch_size(self, fields, objs):\n return len(objs)", + "docstring": "Return the maximum allowed batch size for the backend. The fields are the fields going to be inserted in the batch, the objs contains all the objects to be inserted.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\operations.py", + "ast_data": "FunctionDef name:bulk_batch_size arg:self arg:fields arg:objs arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "clone", + "source_code": "def clone(self):\n return self.__class__(output_graph=self.output_graph_weakref(), id_to_variable=dict(self.id_to_variable), store_attr_mutations={k: dict(v) for k, v in self.store_attr_mutations.items()}, keepalive=list(self.keepalive), save_for_backward=self.save_for_backward, tensor_hooks=self.tensor_hooks)", + "docstring": "Create a shallow copy", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\side_effects.py", + "ast_data": "FunctionDef name:clone arg:self arguments arg Return return:yes Call Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "dev", + "source_code": "@property\ndef dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None", + "docstring": "The development number of the version. >>> print(Version(\"1.2.3\").dev) None >>> Version(\"1.2.3.dev1\").dev 1", + "type": "method", + "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py", + "ast_data": "FunctionDef name:dev arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_union_input_edge_with", + "source_code": "def _union_input_edge_with(input_edge, input_edge_root_qspec, edge_or_node, edge_or_node_to_qspec, shared_with_map):\n root_qspec = None\n if edge_or_node in edge_or_node_to_qspec:\n qspec = edge_or_node_to_qspec[edge_or_node]\n root_qspec = _unwrap_shared_qspec(qspec, edge_or_node_to_qspec, shared_with_map)\n if root_qspec is not None and all((_has_same_attr(root_qspec, input_edge_root_qspec, attr) for attr in ['dtype', 'is_dynamic', 'quant_min', 'quant_max', 'qscheme', 'ch_axis', 'scale', 'zero_point'])):\n _union(edge_or_node, input_edge, shared_with_map)", + "docstring": "Union input edge with another edge or node, used in implicit sharing to point the current input edge to other user edges of the producer node, or the output of producer node since these are referring to the same Tensor", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\prepare.py", + "ast_data": "FunctionDef name:_union_input_edge_with arg:input_edge arg:input_edge_root_qspec arg:edge_or_node arg:edge_or_node_to_qspec arg:shared_with_map arguments arg arg arg arg arg Assign If Compare Assign Assign Call If BoolOp Compare Call Call Call" + }, + { + "library": "scipy", + "name": "_scalar_binopt", + "source_code": "def _scalar_binopt(self, other, op):\n self.sum_duplicates()\n res = self._with_data(op(self.data, other), copy=True)\n res.eliminate_zeros()\n return res", + "docstring": "Scalar version of self._binopt, for cases in which no new nonzeros are added. Produces a new sparse array in canonical form.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_compressed.py", + "ast_data": "FunctionDef name:_scalar_binopt arg:self arg:other arg:op arguments arg arg arg Call Assign Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "RequestDataTooBig", + "source_code": "class RequestDataTooBig(SuspiciousOperation):\n pass", + "docstring": "The size of the request (excluding any file uploads) exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.", + "type": "class", + "file_path": "django\\django\\core\\exceptions.py", + "ast_data": "ClassDef name:RequestDataTooBig" + }, + { + "library": "tensorflow", + "name": "log_ndtr", + "source_code": "def log_ndtr(x, series_order=3, name='log_ndtr'):\n if not isinstance(series_order, int):\n raise TypeError('series_order must be a Python integer.')\n if series_order < 0:\n raise ValueError('series_order must be non-negative.')\n if series_order > 30:\n raise ValueError('series_order must be <= 30.')\n with ops.name_scope(name, values=[x]):\n x = ops.convert_to_tensor(x, name='x')\n if x.dtype.as_numpy_dtype == np.float64:\n lower_segment = LOGNDTR_FLOAT64_LOWER\n upper_segment = LOGNDTR_FLOAT64_UPPER\n elif x.dtype.as_numpy_dtype == np.float32:\n lower_segment = LOGNDTR_FLOAT32_LOWER\n upper_segment = LOGNDTR_FLOAT32_UPPER\n else:\n raise TypeError('x.dtype=%s is not supported.' % x.dtype)\n return array_ops.where_v2(math_ops.greater(x, upper_segment), -_ndtr(-x), array_ops.where_v2(math_ops.greater(x, lower_segment), math_ops.log(_ndtr(math_ops.maximum(x, lower_segment))), _log_ndtr_lower(math_ops.minimum(x, lower_segment), series_order)))", + "docstring": "Log Normal distribution function. For details of the Normal distribution function see . This function calculates by either calling or using an asymptotic series. Specifically: - For , use the approximation based on . - For , use the existing technique and take a log. - For , we use the series approximation of erf to compute the log CDF directly. The is set based on the precision of the input: When , the asymptotic series approximation is: where is a [double-factorial]( Args: x: of type , . series_order: Positive Python . Maximum depth to evaluate the asymptotic expansion. This is the above. name: Python string. A name for the operation (default=\"log_ndtr\"). Returns: log_ndtr: with . Raises: TypeError: if is not handled. TypeError: if is a not Python ValueError: if is not in .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\special_math.py", + "ast_data": "FunctionDef name:log_ndtr arg:x arg:series_order arg:name arguments arg arg arg If Call Raise Call If Compare Raise Call If Compare Raise Call With Call Assign Call If Compare Assign Assign If Compare Assign Assign Raise Call Return return:yes Call Call Call Call Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "matches", + "source_code": "def matches(node, pattern):\n if isinstance(pattern, str):\n pattern = parser.parse_str(pattern)\n matcher = PatternMatcher(pattern)\n matcher.visit(node)\n return matcher.matches", + "docstring": "Basic pattern matcher for AST. The pattern may contain wildcards represented by the symbol '_'. A node matches a pattern if for every node in the tree, either there is a node of the same type in pattern, or a Name node with id='_'. Args: node: ast.AST pattern: ast.AST Returns: bool", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\ast_util.py", + "ast_data": "FunctionDef name:matches arg:node arg:pattern arguments arg arg If Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "stop_event_loop", + "source_code": "def stop_event_loop(self):\n self._looping = False", + "docstring": "Stop the current blocking event loop. Interactive backends need to reimplement this to match", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:stop_event_loop arg:self arguments arg Assign" + }, + { + "library": "scipy", + "name": "get_residual", + "source_code": "def get_residual(self):\n return self._data[10]", + "docstring": "Return weighted sum of squared residuals of the spline approximation. This is equivalent to:: sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py", + "ast_data": "FunctionDef name:get_residual arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "image_size", + "source_code": "@property\ndef image_size(self) -> ImageSize:\n return self.layout.image_size", + "docstring": "Return the image size.", + "type": "method", + "file_path": "kornia\\kornia\\image\\image.py", + "ast_data": "FunctionDef name:image_size arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "transform", + "source_code": "def transform(self, map_fn: Callable[[str], str]) -> 'FunctionCounts':\n counts: collections.defaultdict[str, int] = collections.defaultdict(int)\n for c, fn in self._data:\n counts[map_fn(fn)] += c\n return self._from_dict(counts, self.inclusive)", + "docstring": "Apply to all of the function names. This can be used to regularize function names (e.g. stripping irrelevant parts of the file path), coalesce entries by mapping multiple functions to the same name (in which case the counts are added together), etc.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\valgrind_wrapper\\timer_interface.py", + "ast_data": "FunctionDef name:transform arg:self arg:map_fn arguments arg arg Call For Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "acquire_lock", + "source_code": "def acquire_lock(self):\n self.locked = True\n self.locks.setdefault(self.id, threading.RLock()).acquire()", + "docstring": "Acquire an exclusive lock on the currently-loaded session data.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:acquire_lock arg:self arguments arg Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "_get_sparse_tensors", + "source_code": "def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None):\n input_tensor = inputs.get(self)\n batch_size = array_ops.shape(input_tensor)[0]\n source_dimension = self.source_column.shape[0]\n i1 = array_ops.reshape(array_ops.tile(array_ops.expand_dims(math_ops.range(0, batch_size), 1), [1, source_dimension]), (-1,))\n i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])\n bucket_indices = array_ops.reshape(input_tensor, (-1,)) + (len(self.boundaries) + 1) * i2\n indices = math_ops.cast(array_ops.transpose(array_ops_stack.stack((i1, i2))), dtypes.int64)\n dense_shape = math_ops.cast(array_ops_stack.stack([batch_size, source_dimension]), dtypes.int64)\n sparse_tensor = sparse_tensor_lib.SparseTensor(indices=indices, values=bucket_indices, dense_shape=dense_shape)\n return _CategoricalColumn.IdWeightPair(sparse_tensor, None)", + "docstring": "Converts dense inputs to SparseTensor so downstream code can use it.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_get_sparse_tensors arg:self arg:inputs arg:weight_collections arg:trainable arguments arg arg arg arg Assign Call Assign Call Assign Assign Call Call Call Call Assign Call Call Assign Call Call Assign Call Call Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "greetUser", + "source_code": "@cherrypy.expose\ndef greetUser(self, name=None):\n if name:\n return \"Hey %s, what's up?\" % name\n elif name is None:\n return 'Please enter your name here.'\n else:\n return 'No, really, enter your name here.'", + "docstring": "Render a greeting or form on `` URI.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\tutorial\\tut03_get_and_post.py", + "ast_data": "FunctionDef name:greetUser arg:self arg:name arguments arg arg If Return return:yes If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "make_padding_config", + "source_code": "def make_padding_config(padding_config: PaddingConfig | Sequence[tuple[int, int, int]]) -> PaddingConfig:\n if not isinstance(padding_config, PaddingConfig):\n triples = padding_config\n padding_config = PaddingConfig()\n for lo, hi, interior in triples:\n dimension = PaddingConfigDimension()\n dimension.edge_padding_low = lo\n dimension.edge_padding_high = hi\n dimension.interior_padding = interior\n padding_config.dimensions.append(dimension)\n return padding_config", + "docstring": "Create PaddingConfig proto from list of triples of integers. Args: padding_config: either a PaddingConfig or a list of integer triples (edge_padding_low, edge_padding_high, interior_padding) representing the configuration of the padding operation. Returns: A object.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py", + "ast_data": "FunctionDef name:make_padding_config arg:padding_config arguments arg If Call Assign Assign Call For Assign Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_and_check_qmin_qmax", + "source_code": "def _get_and_check_qmin_qmax(dtype, quant_min, quant_max):\n if dtype in FP8_TYPES:\n quant_min_lower_bound, quant_max_upper_bound = (torch.finfo(dtype).min, torch.finfo(dtype).max)\n elif dtype not in _DTYPE_TO_QVALUE_BOUNDS:\n raise ValueError(f'Unsupported dtype: {dtype}')\n else:\n quant_min_lower_bound, quant_max_upper_bound = _DTYPE_TO_QVALUE_BOUNDS[dtype]\n if quant_min is None:\n quant_min = quant_min_lower_bound\n if quant_max is None:\n quant_max = quant_max_upper_bound\n assert quant_min >= quant_min_lower_bound, f'quant_min out of bound for dtype, quant_min_lower_bound: {quant_min_lower_bound} quant_min: {quant_min}'\n assert quant_max <= quant_max_upper_bound, f'quant_max out of bound for dtype, quant_max_upper_bound: {quant_max_upper_bound} quant_max: {quant_max}'\n return (quant_min, quant_max)", + "docstring": "Get quant_min and quant_max args based on dtype and also verify that they are within the range of possible quant_min/quant_max for dtype", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\_affine_quantization.py", + "ast_data": "FunctionDef name:_get_and_check_qmin_qmax arg:dtype arg:quant_min arg:quant_max arguments arg arg arg If Compare Assign Call Call If Compare Raise Call Assign If Compare Assign If Compare Assign Compare Compare Return return:yes" + }, + { + "library": "tensorflow", + "name": "_is_known_signed_by_dtype", + "source_code": "def _is_known_signed_by_dtype(dt):\n return {dtypes.float16: True, dtypes.float32: True, dtypes.float64: True, dtypes.int8: True, dtypes.int16: True, dtypes.int32: True, dtypes.int64: True}.get(dt.base_dtype, False)", + "docstring": "Helper returning True if dtype is known to be signed.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py", + "ast_data": "FunctionDef name:_is_known_signed_by_dtype arg:dt arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "x_grid_barrier", + "source_code": "@triton.jit\ndef x_grid_barrier(sem):\n tl.debug_barrier()\n one_i32 = 1\n one_u32 = one_i32.to(tl.uint32)\n expected = tl.num_programs(0).to(tl.uint32)\n if tl.program_id(0) == 0:\n nb = 2147483648 - (expected - one_u32)\n else:\n nb = one_u32\n old_arrive = tl.atomic_add(sem, nb, sem='release')\n bar_flipped = False\n while not bar_flipped:\n current_arrive = tl.atomic_add(sem, 0, sem='acquire')\n bar_flipped = (old_arrive ^ current_arrive) & 2147483648 != 0\n tl.debug_barrier()", + "docstring": "Wait for all other thread blocks in grid sharing same y/z program_id to reach this barrier before returning. Args: sem: an uint32 semaphores, zero or 0x80000000 initialized. Must be unique to each y/z program ID.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_helpers.py", + "ast_data": "FunctionDef name:x_grid_barrier arg:sem arguments arg Call Assign Assign Call Assign Call Call If Compare Call Assign Assign Assign Call Assign While Assign Call Assign Compare Call" + }, + { + "library": "kornia", + "name": "normalize_min_max", + "source_code": "def normalize_min_max(x: Tensor, min_val: float=0.0, max_val: float=1.0, eps: float=1e-06) -> Tensor:\n if not isinstance(x, Tensor):\n raise TypeError(f'data should be a tensor. Got: {type(x)}.')\n if not isinstance(min_val, float):\n raise TypeError(f\"'min_val' should be a float. Got: {type(min_val)}.\")\n if not isinstance(max_val, float):\n raise TypeError(f\"'b' should be a float. Got: {type(max_val)}.\")\n if len(x.shape) < 3:\n raise ValueError(f'Input shape must be at least a 3d tensor. Got: {x.shape}.')\n shape = x.shape\n B, C = (shape[0], shape[1])\n x_min: Tensor = x.view(B, C, -1).min(-1)[0].view(B, C, 1)\n x_max: Tensor = x.view(B, C, -1).max(-1)[0].view(B, C, 1)\n x_out: Tensor = (max_val - min_val) * (x.view(B, C, -1) - x_min) / (x_max - x_min + eps) + min_val\n return x_out.view(shape)", + "docstring": "Normalise an image/video tensor by MinMax and re-scales the value between a range. The data is normalised using the following formulation: .. math:: y_i = (b - a) * \\frac{x_i - \\text{min}(x)}{\\text{max}(x) - \\text{min}(x)} + a where :math: is :math: and :math: is :math:. Args: x: The image tensor to be normalised with shape :math:. min_val: The minimum value for the new range. max_val: The maximum value for the new range. eps: Float number to avoid zero division. Returns: The normalised image tensor with same shape as input :math:. Example: >>> x = torch.rand(1, 5, 3, 3) >>> x_norm = normalize_min_max(x, min_val=-1., max_val=1.) >>> x_norm.min() tensor(-1.) >>> x_norm.max() tensor(1.0000)", + "type": "function", + "file_path": "kornia\\kornia\\enhance\\normalize.py", + "ast_data": "FunctionDef name:normalize_min_max arg:x arg:min_val arg:max_val arg:eps arguments arg arg arg arg If Call Raise Call Call If Call Raise Call Call If Call Raise Call Call If Compare Call Raise Call Assign Assign Call Call Call Call Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "stop_iteration", + "source_code": "def stop_iteration(self):\n return self.bvars.get('force_stop', False) or self.ivars['iterations_left'] == 0 or self.ivars['converged_count'] >= self.iparams['k']", + "docstring": "Return True to stop iterations. Note that tracker (if defined) can force-stop iterations by setting ``.", + "type": "method", + "file_path": "pytorch\\torch\\_lobpcg.py", + "ast_data": "FunctionDef name:stop_iteration arg:self arguments arg Return return:yes BoolOp Call Compare Compare" + }, + { + "library": "pytorch", + "name": "is_replicated", + "source_code": "def is_replicated(self) -> bool:\n return all((placement.is_replicate() for placement in self.placements))", + "docstring": "return True if the current DTensorSpec replicates on all mesh dims (devices)", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_dtensor_spec.py", + "ast_data": "FunctionDef name:is_replicated arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, axis=None, use_rmin=True, *, apply_theta_transforms=True):\n super().__init__()\n self._axis = axis\n self._use_rmin = use_rmin\n self._apply_theta_transforms = apply_theta_transforms\n if apply_theta_transforms:\n _apply_theta_transforms_warn()", + "docstring": "Parameters ---------- axis : , optional Axis associated with this transform. This is used to get the minimum radial limit. use_rmin : , optional If ``, add the minimum radial axis limit after transforming from Cartesian coordinates. *axis* must also be specified for this to take effect.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:axis arg:use_rmin arguments arg arg arg arg Call Call Assign Assign Assign If Call" + }, + { + "library": "scikit-learn", + "name": "_get_sample_count", + "source_code": "def _get_sample_count(self, n_samples):\n if isinstance(self.subsample, numbers.Integral):\n if self.subsample < n_samples:\n return self.subsample\n return n_samples\n elif isinstance(self.subsample, numbers.Real):\n return ceil(n_samples * self.subsample)\n return n_samples", + "docstring": "Compute the number of samples as an integer.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\inspection\\_plot\\partial_dependence.py", + "ast_data": "FunctionDef name:_get_sample_count arg:self arg:n_samples arguments arg arg If Call If Compare Return return:yes Return return:yes If Call Return return:yes Call Return return:yes" + }, + { + "library": "pygame", + "name": "get_init", + "source_code": "def get_init():\n return _get_init()", + "docstring": "get_init() -> bool true if the font module is initialized", + "type": "function", + "file_path": "pygame\\src_py\\ftfont.py", + "ast_data": "FunctionDef name:get_init arguments Return return:yes Call" + }, + { + "library": "pandas", + "name": "tz", + "source_code": "@property\ndef tz(self) -> tzinfo:\n return self._tz", + "docstring": "The timezone. See Also -------- DatetimeTZDtype.unit : Retrieves precision of the datetime data. Examples -------- >>> from zoneinfo import ZoneInfo >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo(\"America/Los_Angeles\")) >>> dtype.tz zoneinfo.ZoneInfo(key='America/Los_Angeles')", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", + "ast_data": "FunctionDef name:tz arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "cast", + "source_code": "def cast(self, value, cast_context) -> Any:\n del cast_context\n assert value == self.placeholder_value(PlaceholderContext()), f'Can not cast {value!r} to type {self!r}'\n return value", + "docstring": "Cast value to this type. Args: value: An input value belonging to this TraceType. cast_context: A context reserved for internal/future usage. Returns: The value casted to this TraceType. Raises: AssertionError: When _cast is not overloaded in subclass, the value is returned directly, and it should be the same to self.placeholder_value().", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\types\\trace.py", + "ast_data": "FunctionDef name:cast arg:self arg:value arg:cast_context arguments arg arg arg Compare Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "empty", + "source_code": "@classmethod\ndef empty(cls, n):\n empty_fun = np.empty(0)\n empty_jac = np.empty((0, n))\n empty_hess = sps.csr_array((n, n))\n\n def fun(x):\n return (empty_fun, empty_fun)\n\n def jac(x):\n return (empty_jac, empty_jac)\n\n def hess(x, v_eq, v_ineq):\n return empty_hess\n return cls(0, 0, fun, jac, hess, np.empty(0, dtype=np.bool_))", + "docstring": "Create an \"empty\" instance. This \"empty\" instance is required to allow working with unconstrained problems as if they have some constraints.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\canonical_constraint.py", + "ast_data": "FunctionDef name:empty arg:cls arg:n arguments arg arg Assign Call Assign Call Assign Call FunctionDef name:fun arg:x arguments arg Return return:yes FunctionDef name:jac arg:x arguments arg Return return:yes FunctionDef name:hess arg:x arg:v_eq arg:v_ineq arguments arg arg arg Return return:yes Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_check_finite", + "source_code": "def _check_finite(array: Array, xp: ModuleType) -> None:\n if not xp.all(xp.isfinite(array)):\n msg = 'array must not contain infs or NaNs'\n raise ValueError(msg)", + "docstring": "Check for NaNs or Infs.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\_array_api.py", + "ast_data": "FunctionDef name:_check_finite arg:array arg:xp arguments arg arg If Call Call Assign Raise Call" + }, + { + "library": "tensorflow", + "name": "test_step", + "source_code": "def test_step(self, data):\n data = data_adapter.expand_1d(data)\n x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)\n y_pred = self(x, training=False)\n self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)\n self.compiled_metrics.update_state(y, y_pred, sample_weight)\n return_metrics = {}\n for metric in self.metrics:\n result = metric.result()\n if isinstance(result, dict):\n return_metrics.update(result)\n else:\n return_metrics[metric.name] = result\n return return_metrics", + "docstring": "The logic for one evaluation step. This method can be overridden to support custom evaluation logic. This method is called by . This function should contain the mathematical logic for one step of evaluation. This typically includes the forward pass, loss calculation, and metrics updates. Configuration details for *how* this logic is run (e.g. and settings), should be left to , which can also be overridden. Args: data: A nested structure of s. Returns: A containing values that will be passed to . Typically, the values of the 's metrics are returned.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", + "ast_data": "FunctionDef name:test_step arg:self arg:data arguments arg arg Assign Call Assign Call Assign Call Call Call Assign For Assign Call If Call Call Assign Return return:yes" + }, + { + "library": "scrapy", + "name": "StrictOriginPolicy", + "source_code": "class StrictOriginPolicy(ReferrerPolicy):\n name: str = POLICY_STRICT_ORIGIN\n\n def referrer(self, response_url: str, request_url: str) -> str | None:\n if self.tls_protected(response_url) and self.potentially_trustworthy(request_url) or not self.tls_protected(response_url):\n return self.origin_referrer(response_url)\n return None", + "docstring": "The \"strict-origin\" policy sends the ASCII serialization of the origin of the request client when making requests: - from a TLS-protected environment settings object to a potentially trustworthy URL, and - from non-TLS-protected environment settings objects to any origin. Requests from TLS-protected request clients to non- potentially trustworthy URLs, on the other hand, will contain no referrer information. A Referer HTTP header will not be sent.", + "type": "class", + "file_path": "scrapy\\scrapy\\spidermiddlewares\\referer.py", + "ast_data": "ClassDef name:StrictOriginPolicy FunctionDef name:referrer arg:self arg:response_url arg:request_url arguments arg arg arg If BoolOp BoolOp Call Call Call Return return:yes Call Return return:no" + }, + { + "library": "pytorch", + "name": "_sync_module_params_and_buffers", + "source_code": "def _sync_module_params_and_buffers(module: nn.Module, params: list[nn.Parameter], process_group: dist.ProcessGroup) -> None:\n module_states: list[torch.Tensor] = []\n for buffer in module.buffers():\n if not getattr(buffer, FSDP_SYNCED, False):\n setattr(buffer, FSDP_SYNCED, True)\n detached_buffer = buffer.detach()\n if is_traceable_wrapper_subclass(detached_buffer):\n attrs, _ = detached_buffer.__tensor_flatten__()\n inner_buffers = [getattr(detached_buffer, attr) for attr in attrs]\n module_states.extend(inner_buffers)\n else:\n module_states.append(detached_buffer)\n for param in params:\n detached_param = param.detach()\n if is_traceable_wrapper_subclass(detached_param):\n attrs, _ = detached_param.__tensor_flatten__()\n inner_params = [getattr(detached_param, attr) for attr in attrs]\n module_states.extend(inner_params)\n else:\n module_states.append(detached_param)\n _check_module_states_for_sync_module_states(module_states)\n _sync_params_and_buffers(process_group, module_states, PARAM_BROADCAST_BUCKET_SIZE, src=0)", + "docstring": "Synchronize module states (i.e. parameters `` has been set.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py", + "ast_data": "FunctionDef name:_sync_module_params_and_buffers arg:module arg:params arg:process_group arguments arg arg arg For Call If Call Call Assign Call If Call Assign Call Assign Call Call Call For Assign Call If Call Assign Call Assign Call Call Call Call Call" + }, + { + "library": "numpy", + "name": "getnm", + "source_code": "def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True):\n p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)\n nm_output, nm_err = p.communicate()\n if p.returncode != 0:\n raise RuntimeError('failed to run \"%s\": \"%s\"' % (' '.join(nm_cmd), nm_err))\n return nm_output", + "docstring": "Returns the output of nm_cmd via a pipe. nm_output = getnm(nm_cmd = 'nm -Cs py_lib')", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\lib2def.py", + "ast_data": "FunctionDef name:getnm arg:nm_cmd arg:shell arguments arg arg Assign Call Assign Call If Compare Raise Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_dcg_sample_scores", + "source_code": "def _dcg_sample_scores(y_true, y_score, k=None, log_base=2, ignore_ties=False):\n discount = 1 / (np.log(np.arange(y_true.shape[1]) + 2) / np.log(log_base))\n if k is not None:\n discount[k:] = 0\n if ignore_ties:\n ranking = np.argsort(y_score)[:, ::-1]\n ranked = y_true[np.arange(ranking.shape[0])[:, np.newaxis], ranking]\n cumulative_gains = discount.dot(ranked.T)\n else:\n discount_cumsum = np.cumsum(discount)\n cumulative_gains = [_tie_averaged_dcg(y_t, y_s, discount_cumsum) for y_t, y_s in zip(y_true, y_score)]\n cumulative_gains = np.asarray(cumulative_gains)\n return cumulative_gains", + "docstring": "Compute Discounted Cumulative Gain. Sum the true scores ranked in the order induced by the predicted scores, after applying a logarithmic discount. This ranking metric yields a high value if true labels are ranked high by `None`, use all outputs. log_base : float, default=2 Base of the logarithm used for the discount. A low value means a sharper discount (top results are more important). ignore_ties : bool, default=False Assume that there are no ties in y_score (which is likely to be the case if y_score is continuous) for efficiency gains. Returns ------- discounted_cumulative_gain : ndarray of shape (n_samples,) The DCG score for each sample. See Also -------- ndcg_score : The Discounted Cumulative Gain divided by the Ideal Discounted Cumulative Gain (the DCG obtained for a perfect ranking), in order to have a score between 0 and 1.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\metrics\\_ranking.py", + "ast_data": "FunctionDef name:_dcg_sample_scores arg:y_true arg:y_score arg:k arg:log_base arg:ignore_ties arguments arg arg arg arg arg Assign Call Call Call If Compare Assign If Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_deserialize_nested_config", + "source_code": "def _deserialize_nested_config(deserialize_fn, config):\n\n def _is_single_object(obj):\n if isinstance(obj, dict) and 'class_name' in obj:\n return True\n if isinstance(obj, str):\n return True\n return False\n if config is None:\n return None\n if _is_single_object(config):\n return deserialize_fn(config)\n elif isinstance(config, dict):\n return {k: _deserialize_nested_config(deserialize_fn, v) for k, v in config.items()}\n elif isinstance(config, (tuple, list)):\n return [_deserialize_nested_config(deserialize_fn, obj) for obj in config]\n raise ValueError('Saved configuration not understood.')", + "docstring": "Deserializes arbitrary Keras using .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saving_utils.py", + "ast_data": "FunctionDef name:_deserialize_nested_config arg:deserialize_fn arg:config arguments arg arg FunctionDef name:_is_single_object arg:obj arguments arg If BoolOp Call Compare Return return:yes If Call Return return:yes Return return:yes If Compare Return return:no If Call Return return:yes Call If Call Return return:yes Call Call If Call Return return:yes Call Raise Call" + }, + { + "library": "tensorflow", + "name": "PhysicalDevice", + "source_code": "@tf_export('config.PhysicalDevice')\nclass PhysicalDevice(collections.namedtuple('PhysicalDevice', ['name', 'device_type'])):\n pass", + "docstring": "Abstraction for a locally visible physical device. TensorFlow can utilize various devices such as the CPU or multiple GPUs for computation. Before initializing a local device for use, the user can customize certain properties of the device such as it's visibility or memory configuration. Once a visible is initialized one or more objects are created. Use to configure the visibility of a physical device and to configure multiple objects for a . This is useful when separation between models is needed or to simulate a multi-device environment. Fields: name: Unique identifier for device. device_type: String declaring the type of device such as \"CPU\" or \"GPU\".", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "ClassDef name:PhysicalDevice Call Call" + }, + { + "library": "tensorflow", + "name": "_append_composite_tensor", + "source_code": "def _append_composite_tensor(target, to_append):\n if type(target) is not type(to_append):\n raise RuntimeError('Unable to concatenate %s and %s' % (type(target), type(to_append)))\n if isinstance(target, sparse_tensor.SparseTensor):\n return sparse_ops.sparse_concat(sp_inputs=[target, to_append], axis=0)\n elif isinstance(target, ragged_tensor.RaggedTensor):\n return array_ops.concat([target, to_append], axis=0)\n elif isinstance(target, sparse_tensor.SparseTensorValue):\n return _append_sparse_tensor_value(target, to_append)\n elif isinstance(target, ragged_tensor_value.RaggedTensorValue):\n return _append_ragged_tensor_value(target, to_append)\n else:\n raise RuntimeError('Attempted to concatenate unsupported object %s.' % type(target))", + "docstring": "Helper function to append composite tensors to each other in the 0 axis. In order to support batching within a fit/evaluate/predict call, we need to be able to aggregate within a CompositeTensor. Unfortunately, the CT API currently does not make this easy - especially in V1 mode, where we're working with CompositeTensor Value objects that have no connection with the CompositeTensors that created them. Args: target: CompositeTensor or CompositeTensor value object that will be appended to. to_append: CompositeTensor or CompositeTensor value object to append to. 'target'. Returns: A CompositeTensor or CompositeTensor value object. Raises: RuntimeError: if concatenation is not possible.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", + "ast_data": "FunctionDef name:_append_composite_tensor arg:target arg:to_append arguments arg arg If Compare Call Call Raise Call Call Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Raise Call Call" + }, + { + "library": "matplotlib", + "name": "to_hex", + "source_code": "def to_hex(c, keep_alpha=False):\n c = to_rgba(c)\n if not keep_alpha:\n c = c[:3]\n return '#' + ''.join((format(round(val * 255), '02x') for val in c))", + "docstring": "Convert *c* to a hex color. Parameters ---------- c : :mpltype: or keep_alpha : bool, default: False If False, use the `` hex color string", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:to_hex arg:c arg:keep_alpha arguments arg arg Assign Call If Assign Return return:yes Call Call Call" + }, + { + "library": "kornia", + "name": "matrix", + "source_code": "def matrix(self) -> Tensor:\n rt = concatenate((self.r.matrix(), self.t.data[..., None]), -1)\n rt_4x4 = pad(rt, (0, 0, 0, 1))\n rt_4x4[..., -1, -1] = 1.0\n return rt_4x4", + "docstring": "Return the matrix representation of shape :math:. Example: >>> s = Se3(So3.identity(), torch.ones(3)) >>> s.matrix() tensor([[1., 0., 0., 1.], [0., 1., 0., 1.], [0., 0., 1., 1.], [0., 0., 0., 1.]], grad_fn=)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py", + "ast_data": "FunctionDef name:matrix arg:self arguments arg Assign Call Call Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "conv_output_length", + "source_code": "def conv_output_length(input_length, filter_size, padding, stride, dilation=1):\n if input_length is None:\n return None\n assert padding in {'same', 'valid', 'full', 'causal'}\n dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)\n if padding in ['same', 'causal']:\n output_length = input_length\n elif padding == 'valid':\n output_length = input_length - dilated_filter_size + 1\n elif padding == 'full':\n output_length = input_length + dilated_filter_size - 1\n return (output_length + stride - 1) // stride", + "docstring": "Determines output length of a convolution given input length. Args: input_length: integer. filter_size: integer. padding: one of \"same\", \"valid\", \"full\", \"causal\" stride: integer. dilation: dilation rate, integer. Returns: The output length (integer).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\conv_utils.py", + "ast_data": "FunctionDef name:conv_output_length arg:input_length arg:filter_size arg:padding arg:stride arg:dilation arguments arg arg arg arg arg If Compare Return return:no Compare Assign If Compare Assign If Compare Assign If Compare Assign Return return:yes" + }, + { + "library": "pandas", + "name": "read_feather", + "source_code": "@doc(storage_options=_shared_docs['storage_options'])\ndef read_feather(path: FilePath | ReadBuffer[bytes], columns: Sequence[Hashable] | None=None, use_threads: bool=True, storage_options: StorageOptions | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default) -> DataFrame:\n import_optional_dependency('pyarrow')\n from pyarrow import feather\n import pandas.core.arrays.arrow.extension_types\n check_dtype_backend(dtype_backend)\n with get_handle(path, 'rb', storage_options=storage_options, is_text=False) as handles:\n if dtype_backend is lib.no_default and (not using_string_dtype()):\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', 'make_block is deprecated', DeprecationWarning)\n return feather.read_feather(handles.handle, columns=columns, use_threads=bool(use_threads))\n pa_table = feather.read_table(handles.handle, columns=columns, use_threads=bool(use_threads))\n return arrow_table_to_pandas(pa_table, dtype_backend=dtype_backend)", + "docstring": "Load a feather-format object from the file path. Feather is particularly useful for scenarios that require efficient serialization and deserialization of tabular data. It supports schema preservation, making it a reliable choice for use cases such as sharing data between Python and R, or persisting intermediate results during data processing pipelines. This method provides additional flexibility with options for selective column reading, thread parallelism, and choosing the backend for data types. Parameters ---------- path : str, path object, or file-like object String, path object (implementing `DataFrameDataFrameArrowDtypeDataFrame` .. versionadded:: 2.0 Returns ------- type of object stored in file DataFrame object stored in the file. See Also -------- read_csv : Read a comma-separated values (csv) file into a pandas DataFrame. read_excel : Read an Excel file into a pandas DataFrame. read_spss : Read an SPSS file into a pandas DataFrame. read_orc : Load an ORC object into a pandas DataFrame. read_sas : Read SAS file into a pandas DataFrame. Examples -------- >>> df = pd.read_feather(\"path/to/file.feather\") # doctest: +SKIP", + "type": "function", + "file_path": "pandas\\pandas\\io\\feather_format.py", + "ast_data": "FunctionDef name:read_feather arg:path arg:columns arg:use_threads arg:storage_options arg:dtype_backend arguments arg arg arg arg arg Call Call With Call If BoolOp Compare Call With Call Call Return return:yes Call Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "optim_state_dict_to_load", + "source_code": "@staticmethod\ndef optim_state_dict_to_load(model: torch.nn.Module, optim: torch.optim.Optimizer, optim_state_dict: dict[str, Any], is_named_optimizer: bool=False, load_directly: bool=False, group: Optional[dist.ProcessGroup]=None) -> dict[str, Any]:\n state_dict_settings = FullyShardedDataParallel.get_state_dict_type(model)\n result = FullyShardedDataParallel._optim_state_dict_to_load_impl(optim_state_dict=optim_state_dict, model=model, optim_input=None, optim=optim, full_state_dict=state_dict_settings.state_dict_type == StateDictType.FULL_STATE_DICT, rank0_only=getattr(state_dict_settings.optim_state_dict_config, 'rank0_only', False), is_named_optimizer=is_named_optimizer, group=group)\n if load_directly:\n optim.load_state_dict(result)\n return result", + "docstring": "Convert an optimizer state-dict so that it can be loaded into the optimizer associated with the FSDP model. Given a `optim_state_dictFullyShardedDataParallel`)", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py", + "ast_data": "FunctionDef name:optim_state_dict_to_load arg:model arg:optim arg:optim_state_dict arg:is_named_optimizer arg:load_directly arg:group arguments arg arg arg arg arg arg Assign Call Assign Call Compare Call If Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_reduction_params", + "source_code": "def _get_reduction_params(block_size, input_size):\n assert len(block_size) == len(input_size)\n shape_for_reduction = []\n reduction_dims = []\n cur_dim = 0\n for i in range(len(block_size)):\n if block_size[i] != input_size[i] and block_size[i] > 1:\n assert input_size[i] % block_size[i] == 0, f'Expecting input size at {i} dimension: {input_size[i]} to be divisible by block_size at {i} dimension: {block_size[i]}'\n shape_for_reduction.append(input_size[i] // block_size[i])\n shape_for_reduction.append(block_size[i])\n reduction_dims.append(cur_dim + 1)\n cur_dim += 2\n else:\n shape_for_reduction.append(input_size[i])\n if block_size[i] != 1:\n reduction_dims.append(cur_dim)\n cur_dim += 1\n return (shape_for_reduction, reduction_dims)", + "docstring": "Given block_size and input size find the parameters for reduction: Output: shape_for_reduction: the shape we use to input to prepare it for reduction reduction_dims: the dims we'll do reduction over Example:: Input: block_size: (3, 3, 2, 10) input_size: (3, 3, 10, 10) Output: shape_for_reduction: (3, 3, 5, 2, 10) reduction_dim: [0, 1, 3, 4]", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\_affine_quantization.py", + "ast_data": "FunctionDef name:_get_reduction_params arg:block_size arg:input_size arguments arg arg Compare Call Call Assign Assign Assign For Call Call If BoolOp Compare Compare Compare Call Call Call Call If Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "from_config", + "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n _check_config_keys(config, cls._fields)\n kwargs = _standardize_and_copy_config(config)\n kwargs['normalizer_fn'] = serialization._deserialize_keras_object(config['normalizer_fn'], custom_objects=custom_objects)\n kwargs['dtype'] = dtypes.as_dtype(config['dtype'])\n return cls(**kwargs)", + "docstring": "See 'FeatureColumn` base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "get_swappable_settings_name", + "source_code": "@functools.cache\ndef get_swappable_settings_name(self, to_string):\n to_string = to_string.lower()\n for model in self.get_models(include_swapped=True):\n swapped = model._meta.swapped\n if swapped and swapped.lower() == to_string:\n return model._meta.swappable\n if model._meta.swappable and model._meta.label_lower == to_string:\n return model._meta.swappable\n return None", + "docstring": "For a given model string (e.g. \"auth.User\"), return the name of the corresponding settings name if it refers to a swappable model. If the referred model is not swappable, return None. This method is decorated with @functools.cache because it's performance critical when it comes to migrations. Since the swappable settings don't change after Django has loaded the settings, there is no reason to get the respective settings attribute over and over again.", + "type": "method", + "file_path": "django\\django\\apps\\registry.py", + "ast_data": "FunctionDef name:get_swappable_settings_name arg:self arg:to_string arguments arg arg Assign Call For Call Assign If BoolOp Compare Call Return return:yes If BoolOp Compare Return return:yes Return return:no" + }, + { + "library": "sphinx", + "name": "update_context", + "source_code": "def update_context(self) -> None:\n registry = self.env._registry\n self.context['packages'] = registry.latex_packages\n self.context['packages_after_hyperref'] = registry.latex_packages_after_hyperref", + "docstring": "Update template variables for .tex file just before writing.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\latex\\__init__.py", + "ast_data": "FunctionDef name:update_context arg:self arguments arg Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "_list_function_deps", + "source_code": "def _list_function_deps(fdef, library_function_names, library_gradient_names):\n deps = set()\n for node_def in fdef.node_def:\n grad_op_type = _get_gradient_op_type(node_def)\n if node_def.op in library_function_names:\n deps.add(node_def.op)\n elif grad_op_type and grad_op_type in library_gradient_names:\n deps.add(library_gradient_names[grad_op_type])\n else:\n for _, attr_value in node_def.attr.items():\n if attr_value.WhichOneof('value') == 'func':\n deps.add(attr_value.func.name)\n elif attr_value.WhichOneof('value') == 'list':\n for fn in attr_value.list.func:\n deps.add(fn.name)\n return deps", + "docstring": "Find functions referenced in .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_deserialization.py", + "ast_data": "FunctionDef name:_list_function_deps arg:fdef arg:library_function_names arg:library_gradient_names arguments arg arg arg Assign Call For Assign Call If Compare Call If BoolOp Compare Call For Call If Compare Call Call If Compare Call For Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_pg_config", + "source_code": "def _get_pg_config(group: Optional[ProcessGroup]=None) -> dict[str, Any]:\n pg = group or _get_default_group()\n return {'pg_name': _get_process_group_name(pg), 'pg_desc': pg.group_desc, 'backend_config': get_backend_config(pg), 'pg_size': _get_group_size(pg), 'ranks': get_process_group_ranks(pg)}", + "docstring": "Return the pg configuration of the given process group.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:_get_pg_config arg:group arguments arg Assign BoolOp Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_variant_handle_data", + "source_code": "def _variant_handle_data(t):\n handle_data = resource_variable_ops.get_eager_safe_handle_data(t)\n if not handle_data.is_set:\n return None\n return handle_data.shape_and_type", + "docstring": "Fetches handle data for a variant tensor , or None if unavailable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:_variant_handle_data arg:t arguments arg Assign Call If Return return:no Return return:yes" + }, + { + "library": "pytorch", + "name": "_is_conv_or_conv_transpose_node", + "source_code": "def _is_conv_or_conv_transpose_node(n: Node):\n return _is_conv_node(n) or _is_conv_transpose_node(n)", + "docstring": "Return whether the node refers to an aten conv or conv transpose op.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\utils.py", + "ast_data": "FunctionDef name:_is_conv_or_conv_transpose_node arg:n arguments arg Return return:yes BoolOp Call Call" + }, + { + "library": "django", + "name": "split_tzname_delta", + "source_code": "def split_tzname_delta(tzname):\n for sign in ['+', '-']:\n if sign in tzname:\n name, offset = tzname.rsplit(sign, 1)\n if offset and parse_time(offset):\n if ':' not in offset:\n offset = f'{offset}:00'\n return (name, sign, offset)\n return (tzname, None, None)", + "docstring": "Split a time zone name into a 3-tuple of (name, sign, offset).", + "type": "function", + "file_path": "django\\django\\db\\backends\\utils.py", + "ast_data": "FunctionDef name:split_tzname_delta arg:tzname arguments arg For If Compare Assign Call If BoolOp Call If Compare Assign Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "_justify", + "source_code": "def _justify(head: list[Sequence[str]], tail: list[Sequence[str]]) -> tuple[list[tuple[str, ...]], list[tuple[str, ...]]]:\n combined = head + tail\n max_length = [0] * len(combined[0])\n for inner_seq in combined:\n length = [len(item) for item in inner_seq]\n max_length = [max(x, y) for x, y in zip(max_length, length)]\n head_tuples = [tuple((x.rjust(max_len) for x, max_len in zip(seq, max_length))) for seq in head]\n tail_tuples = [tuple((x.rjust(max_len) for x, max_len in zip(seq, max_length))) for seq in tail]\n return (head_tuples, tail_tuples)", + "docstring": "Justify items in head and tail, so they are right-aligned when stacked. Parameters ---------- head : list-like of list-likes of strings tail : list-like of list-likes of strings Returns ------- tuple of list of tuples of strings Same as head and tail, but items are right aligned when stacked vertically. Examples -------- >>> _justify([[\"a\", \"b\"]], [[\"abc\", \"abcd\"]]) ([(' a', ' b')], [('abc', 'abcd')])", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\printing.py", + "ast_data": "FunctionDef name:_justify arg:head arg:tail arguments arg arg Assign Assign Call For Assign Call Assign Call Call Assign Call Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "name", + "source_code": "@property\ndef name(self):\n return self._name", + "docstring": "The name of the underlying accumulator.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "_int_overflow", + "source_code": "def _int_overflow(x, exception, msg=None):\n if x > iinfo(dfitpack_int).max:\n if msg is None:\n msg = f'{x!r} cannot fit into an {dfitpack_int!r}'\n raise exception(msg)\n return dfitpack_int.type(x)", + "docstring": "Cast the value to an dfitpack_int and raise an OverflowError if the value cannot fit.", + "type": "function", + "file_path": "scipy\\scipy\\interpolate\\_fitpack_impl.py", + "ast_data": "FunctionDef name:_int_overflow arg:x arg:exception arg:msg arguments arg arg arg If Compare Call If Compare Assign Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_set_converter_options_for_calibration", + "source_code": "def _set_converter_options_for_calibration(self, converter: TFLiteConverter) -> TFLiteConverter:\n if not converter.optimizations:\n raise ValueError('converter object must set optimizations to lite.Optimize.DEFAULT')\n if not converter.representative_dataset:\n raise ValueError('converter object must set representative_dataset')\n converter.experimental_mlir_quantizer = True\n converter._experimental_calibrate_only = True\n return converter", + "docstring": "Verify converter options and set required experimental options.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py", + "ast_data": "FunctionDef name:_set_converter_options_for_calibration arg:self arg:converter arguments arg arg If Raise Call If Raise Call Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "combine_two_partitions", + "source_code": "def combine_two_partitions(partition_0: Partition, partition_1: Partition, partitions: list[Partition]) -> None:\n partition = Partition(len(partitions))\n partition.nodes = partition_0.nodes.union(partition_1.nodes)\n partition.recalculate_mem_size()\n partitions.append(partition)\n partitions.remove(partition_0)\n partitions.remove(partition_1)\n reorganize_partitions(partitions)\n return", + "docstring": "Given a list of partitions and its two partitions, combine these two partitions into a new one appending to the partitions and remove the previous two partitions from the list of partitions", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py", + "ast_data": "FunctionDef name:combine_two_partitions arg:partition_0 arg:partition_1 arg:partitions arguments arg arg arg Assign Call Call Assign Call Call Call Call Call Call Return return:no" + }, + { + "library": "pandas", + "name": "_unpack_zerodim_and_defer", + "source_code": "def _unpack_zerodim_and_defer(method: F, name: str) -> F:\n stripped_name = name.removeprefix('__').removesuffix('__')\n is_cmp = stripped_name in {'eq', 'ne', 'lt', 'le', 'gt', 'ge'}\n\n @wraps(method)\n def new_method(self, other):\n if is_cmp and isinstance(self, ABCIndex) and isinstance(other, ABCSeries):\n pass\n else:\n prio = getattr(other, '__pandas_priority__', None)\n if prio is not None:\n if prio > self.__pandas_priority__:\n return NotImplemented\n other = item_from_zerodim(other)\n return method(self, other)\n return new_method", + "docstring": "Boilerplate for pandas conventions in arithmetic and comparison methods. Ensure method returns NotImplemented when operating against \"senior\" classes. Ensure zero-dimensional ndarrays are always unpacked. Parameters ---------- method : binary method name : str Returns ------- method", + "type": "function", + "file_path": "pandas\\pandas\\core\\ops\\common.py", + "ast_data": "FunctionDef name:_unpack_zerodim_and_defer arg:method arg:name arguments arg arg Assign Call Call Assign Compare FunctionDef name:new_method arg:self arg:other arguments arg arg If BoolOp Call Call Assign Call If Compare If Compare Return return:yes Assign Call Return return:yes Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "custom_bwd", + "source_code": "@deprecated(\"`torch.cuda.amp.custom_bwd(args...)` is deprecated. Please use `torch.amp.custom_bwd(args..., device_type='cuda')` instead.\", category=FutureWarning)\ndef custom_bwd(bwd):\n return functools.partial(torch.amp.custom_bwd, device_type='cuda')(bwd)", + "docstring": "`` instead.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\amp\\autocast_mode.py", + "ast_data": "FunctionDef name:custom_bwd arg:bwd arguments arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "isWrappedScalarType", + "source_code": "def isWrappedScalarType(typ: Type) -> bool:\n if isinstance(typ, BaseType):\n return typ.name == BaseTy.Scalar\n elif isinstance(typ, (OptionalType, ListType)):\n return isWrappedScalarType(typ.elem)\n return False", + "docstring": "Given a type, determine if it is a c10::scalar which we will wrap in a lazy Value. Since we literally change the type from scalarT to valueT, information is lost. This function helps build a list of wrapped scalars to save that information", + "type": "function", + "file_path": "pytorch\\torchgen\\api\\lazy.py", + "ast_data": "FunctionDef name:isWrappedScalarType arg:typ arguments arg If Call Return return:yes Compare If Call Return return:yes Call Return return:yes" + }, + { + "library": "sphinx", + "name": "word_filter", + "source_code": "def word_filter(self, word: str) -> bool:\n return not word.isdigit() and word not in self.stopwords", + "docstring": "Return true if the target word should be registered in the search index. This method is called after stemming.", + "type": "method", + "file_path": "sphinx\\sphinx\\search\\__init__.py", + "ast_data": "FunctionDef name:word_filter arg:self arg:word arguments arg arg Return return:yes BoolOp Call Compare" + }, + { + "library": "pytorch", + "name": "__repr__", + "source_code": "def __repr__(self) -> str:\n return f\"\"", + "docstring": "A representation of the Version that shows all internal state. >>> Version('1.0.0')", + "type": "method", + "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py", + "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_non_persistent_buffers", + "source_code": "def _get_non_persistent_buffers(mod: torch.nn.Module) -> set[str]:\n result: set[str] = set()\n for name, m in mod.named_modules(remove_duplicate=False):\n if name:\n result.update((f'{name}.{b}' for b in m._non_persistent_buffers_set))\n else:\n result.update(m._non_persistent_buffers_set)\n return result", + "docstring": "Returns set of non-persistent buffers in a module and its submodules.", + "type": "function", + "file_path": "pytorch\\torch\\export\\_trace.py", + "ast_data": "FunctionDef name:_get_non_persistent_buffers arg:mod arguments arg Call For Call If Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_set_control_flow_context", + "source_code": "def _set_control_flow_context(self, ctx) -> None:\n self._control_flow_context = ctx", + "docstring": "Sets the current control flow context of this op. Args: ctx: a context object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_set_control_flow_context arg:self arg:ctx arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "SparseCategoricalAccuracy", + "source_code": "class SparseCategoricalAccuracy(MeanMetricWrapper):\n\n def __init__(self, name='sparse_categorical_accuracy', dtype=None):\n super(SparseCategoricalAccuracy, self).__init__(sparse_categorical_accuracy, name, dtype=dtype)", + "docstring": "Calculates how often predictions match integer labels. You can provide logits of classes as , since argmax of logits and probabilities are same. This metric creates two local variables, and that are used to compute the frequency with which matches . This frequency is ultimately returned as : an idempotent operation that simply divides by . If is , weights default to 1. Use of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.SparseCategoricalAccuracy() >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]]) >>> m.result().numpy() 0.5 >>> m.reset_state() >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]], ... sample_weight=[0.7, 0.3]) >>> m.result().numpy() 0.3 Usage with API:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "ClassDef name:SparseCategoricalAccuracy FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call" + }, + { + "library": "pandas", + "name": "construct_array_type", + "source_code": "@classmethod\ndef construct_array_type(cls) -> type_t[ExtensionArray]:\n raise AbstractMethodError(cls)", + "docstring": "Return the array type associated with this dtype. Returns ------- type", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\base.py", + "ast_data": "FunctionDef name:construct_array_type arg:cls arguments arg Raise Call" + }, + { + "library": "kornia", + "name": "ConvQuadInterp3d", + "source_code": "class ConvQuadInterp3d(Module):\n\n def __init__(self, strict_maxima_bonus: float=10.0, eps: float=1e-07) -> None:\n super().__init__()\n self.strict_maxima_bonus = strict_maxima_bonus\n self.eps = eps\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(strict_maxima_bonus={self.strict_maxima_bonus})'\n\n def forward(self, x: Tensor) -> tuple[Tensor, Tensor]:\n return conv_quad_interp3d(x, self.strict_maxima_bonus, self.eps)", + "docstring": "Calculate soft argmax 3d per window. See :func: for details.", + "type": "class", + "file_path": "kornia\\kornia\\geometry\\subpix\\spatial_soft_argmax.py", + "ast_data": "ClassDef name:ConvQuadInterp3d FunctionDef name:__init__ arg:self arg:strict_maxima_bonus arg:eps arguments arg arg arg Call Call Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "looper", + "source_code": "class looper:\n\n def __init__(self, seq):\n self.seq = seq\n\n def __iter__(self):\n return looper_iter(self.seq)\n\n def __repr__(self):\n return '<%s for %r>' % (self.__class__.__name__, self.seq)", + "docstring": "Helper for looping (particularly in templates) Use this like:: for loop, item in looper(seq): if loop.first: ...", + "type": "class", + "file_path": "numpy\\numpy\\_build_utils\\tempita\\_looper.py", + "ast_data": "ClassDef name:looper FunctionDef name:__init__ arg:self arg:seq arguments arg arg Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "datum", + "source_code": "@property\ndef datum(self):\n return self.srs['datum']", + "docstring": "Return the datum for this spatial reference.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py", + "ast_data": "FunctionDef name:datum arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "construct_strides", + "source_code": "def construct_strides(sizes: Sequence[int], fill_order: Sequence[int]) -> Sequence[int]:\n assert len(sizes) == len(fill_order), 'Length of sizes must match the length of the fill order'\n strides = [0] * len(sizes)\n current_stride = 1\n for dim in fill_order:\n strides[dim] = current_stride\n current_stride *= sizes[dim]\n return strides", + "docstring": "From a list of sizes and a fill order, construct the strides of the permuted tensor.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\kernel\\flex_attention.py", + "ast_data": "FunctionDef name:construct_strides arg:sizes arg:fill_order arguments arg arg Compare Call Call Assign Call Assign For Assign Return return:yes" + }, + { + "library": "scipy", + "name": "compute_R", + "source_code": "def compute_R(order, factor):\n I = np.arange(1, order + 1)[:, None]\n J = np.arange(1, order + 1)\n M = np.zeros((order + 1, order + 1))\n M[1:, 1:] = (I - 1 - factor * J) / I\n M[0] = 1\n return np.cumprod(M, axis=0)", + "docstring": "Compute the matrix for changing the differences array.", + "type": "function", + "file_path": "scipy\\scipy\\integrate\\_ivp\\bdf.py", + "ast_data": "FunctionDef name:compute_R arg:order arg:factor arguments arg arg Assign Call Assign Call Assign Call Assign Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_TrainingTarget", + "source_code": "class _TrainingTarget(object):\n\n def __init__(self, target, feedable=False, skip_target_weights=True):\n self._target = target\n self._feedable = feedable\n self._skip_target_weights = skip_target_weights\n\n @property\n def target(self):\n return self._target\n\n @property\n def feedable(self):\n return self._feedable\n\n @property\n def skip_target_weights(self):\n return self._skip_target_weights", + "docstring": "Container for a target tensor (y_true) and its metadata (shape, loss...). Args: target: A target tensor for the model. It may be if the output is excluded from loss computation. It is still kept as None since each output of the model should have a corresponding target. If the target is None, the rest of the attributes will be None as well. feedable: Boolean, whether the target is feedable (requires data to be passed in or ), or not (model compiled with argument). skip_target_weights: Boolean, whether the target should be skipped during weights calculation.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py", + "ast_data": "ClassDef name:_TrainingTarget FunctionDef name:__init__ arg:self arg:target arg:feedable arg:skip_target_weights arguments arg arg arg arg Assign Assign Assign FunctionDef name:target arg:self arguments arg Return return:yes FunctionDef name:feedable arg:self arguments arg Return return:yes FunctionDef name:skip_target_weights arg:self arguments arg Return return:yes" + }, + { + "library": "virtualenv", + "name": "seeder", + "source_code": "@property\ndef seeder(self):\n return self._seeder", + "docstring": "The mechanism used to provide the seed packages (pip, setuptools, wheel).", + "type": "method", + "file_path": "virtualenv\\src\\virtualenv\\run\\session.py", + "ast_data": "FunctionDef name:seeder arg:self arguments arg Return return:yes" + }, + { + "library": "authlib", + "name": "validate_jwks_uri", + "source_code": "def validate_jwks_uri(self):\n url = self.get('jwks_uri')\n if url and (not is_secure_transport(url)):\n raise ValueError('\"jwks_uri\" MUST use \"https\" scheme')", + "docstring": "OPTIONAL. URL of the authorization server's JWK Set [JWK] document. The referenced document contains the signing key(s) the client uses to validate signatures from the authorization server. This URL MUST use the \"https\" scheme. The JWK Set MAY also contain the server's encryption key or keys, which are used by clients to encrypt requests to the server. When both signing and encryption keys are made available, a \"use\" (public key use) parameter value is REQUIRED for all keys in the referenced JWK Set to indicate each key's intended usage.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py", + "ast_data": "FunctionDef name:validate_jwks_uri arg:self arguments arg Assign Call If BoolOp Call Raise Call" + }, + { + "library": "matplotlib", + "name": "AsinhTransform", + "source_code": "class AsinhTransform(Transform):\n input_dims = output_dims = 1\n\n def __init__(self, linear_width):\n super().__init__()\n if linear_width <= 0.0:\n raise ValueError(\"Scale parameter 'linear_width' \" + 'must be strictly positive')\n self.linear_width = linear_width\n\n def transform_non_affine(self, values):\n return self.linear_width * np.arcsinh(values / self.linear_width)\n\n def inverted(self):\n return InvertedAsinhTransform(self.linear_width)", + "docstring": "Inverse hyperbolic-sine transformation used by", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\scale.py", + "ast_data": "ClassDef name:AsinhTransform Assign FunctionDef name:__init__ arg:self arg:linear_width arguments arg arg Call Call If Compare Raise Call Assign FunctionDef name:transform_non_affine arg:self arg:values arguments arg arg Return return:yes Call FunctionDef name:inverted arg:self arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "rgb_to_ycbcr", + "source_code": "def rgb_to_ycbcr(image: Tensor) -> Tensor:\n if not isinstance(image, Tensor):\n raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n r: Tensor = image[..., 0, :, :]\n g: Tensor = image[..., 1, :, :]\n b: Tensor = image[..., 2, :, :]\n delta: float = 0.5\n y: Tensor = _rgb_to_y(r, g, b)\n cb: Tensor = (b - y) * 0.564 + delta\n cr: Tensor = (r - y) * 0.713 + delta\n return torch.stack([y, cb, cr], -3)", + "docstring": "Convert an RGB image to YCbCr. .. image:: _static/img/rgb_to_ycbcr.png Args: image: RGB Image to be converted to YCbCr with shape :math:. Returns: YCbCr version of the image with shape :math:. Examples: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb_to_ycbcr(input) # 2x3x4x5", + "type": "function", + "file_path": "kornia\\kornia\\color\\ycbcr.py", + "ast_data": "FunctionDef name:rgb_to_ycbcr arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_generate_normals", + "source_code": "def _generate_normals(polygons):\n if isinstance(polygons, np.ndarray):\n n = polygons.shape[-2]\n i1, i2, i3 = (0, n // 3, 2 * n // 3)\n v1 = polygons[..., i1, :] - polygons[..., i2, :]\n v2 = polygons[..., i2, :] - polygons[..., i3, :]\n else:\n v1 = np.empty((len(polygons), 3))\n v2 = np.empty((len(polygons), 3))\n for poly_i, ps in enumerate(polygons):\n n = len(ps)\n ps = np.asarray(ps)\n i1, i2, i3 = (0, n // 3, 2 * n // 3)\n v1[poly_i, :] = ps[i1, :] - ps[i2, :]\n v2[poly_i, :] = ps[i2, :] - ps[i3, :]\n return np.cross(v1, v2)", + "docstring": "Compute the normals of a list of polygons, one normal per polygon. Normals point towards the viewer for a face with its vertices in counterclockwise order, following the right hand rule. Uses three points equally spaced around the polygon. This method assumes that the points are in a plane. Otherwise, more than one shade is required, which is not supported. Parameters ---------- polygons : list of (M_i, 3) array-like, or (..., M, 3) array-like A sequence of polygons to compute normals for, which can have varying numbers of vertices. If the polygons all have the same number of vertices and array is passed, then the operation will be vectorized. Returns ------- normals : (..., 3) array A normal vector estimated for the polygon.", + "type": "function", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:_generate_normals arg:polygons arguments arg If Call Assign Assign Assign Assign Assign Call Call Assign Call Call For Call Assign Call Assign Call Assign Assign Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "has_mismatch", + "source_code": "def has_mismatch(self) -> bool:\n return self.mismatch_error is not None", + "docstring": "Return True if the subgraph has output mismatch between torch and ONNX.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\verification.py", + "ast_data": "FunctionDef name:has_mismatch arg:self arguments arg Return return:yes Compare" + }, + { + "library": "scipy", + "name": "_conditional_oddsratio", + "source_code": "def _conditional_oddsratio(table):\n x, M, n, N = _hypergeom_params_from_table(table)\n lo, hi = nchypergeom_fisher.support(M, n, N, 1)\n if x == lo:\n return 0\n if x == hi:\n return np.inf\n nc = _nc_hypergeom_mean_inverse(x, M, n, N)\n return nc", + "docstring": "Conditional MLE of the odds ratio for the 2x2 contingency table.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_odds_ratio.py", + "ast_data": "FunctionDef name:_conditional_oddsratio arg:table arguments arg Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "get_latest_release_doc", + "source_code": "def get_latest_release_doc(path):\n file_paths = os.listdir(path)\n file_paths.sort(key=lambda x: list(map(int, x.split('-')[0].split('.'))))\n return os.path.join(path, file_paths[-1])", + "docstring": "Method to pick the file from 'doc/release' with the highest release number (e.g., ).", + "type": "function", + "file_path": "scipy\\tools\\write_release_and_log.py", + "ast_data": "FunctionDef name:get_latest_release_doc arg:path arguments arg Assign Call Call arguments arg Call Call Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "precision", + "source_code": "@property\ndef precision(self):\n return capi.get_field_precision(self.ptr)", + "docstring": "Return the precision of this Field.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\field.py", + "ast_data": "FunctionDef name:precision arg:self arguments arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "get_fieldstructure", + "source_code": "def get_fieldstructure(adtype, lastname=None, parents=None):\n if parents is None:\n parents = {}\n names = adtype.names\n for name in names:\n current = adtype[name]\n if current.names is not None:\n if lastname:\n parents[name] = [lastname]\n else:\n parents[name] = []\n parents.update(get_fieldstructure(current, name, parents))\n else:\n lastparent = list(parents.get(lastname, []) or [])\n if lastparent:\n lastparent.append(lastname)\n elif lastname:\n lastparent = [lastname]\n parents[name] = lastparent or []\n return parents", + "docstring": "Returns a dictionary with fields indexing lists of their parent fields. This function is used to simplify access to fields nested in other fields. Parameters ---------- adtype : np.dtype Input datatype lastname : optional Last processed field name (used internally during recursion). parents : dictionary Dictionary of parent fields (used internally during recursion). Examples -------- >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = np.dtype([('A', int), ... ('B', [('BA', int), ... ('BB', [('BBA', int), ('BBB', int)])])]) >>> rfn.get_fieldstructure(ndtype) ... # XXX: possible regression, order of BBA and BBB is swapped {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}", + "type": "function", + "file_path": "numpy\\numpy\\lib\\recfunctions.py", + "ast_data": "FunctionDef name:get_fieldstructure arg:adtype arg:lastname arg:parents arguments arg arg arg If Compare Assign Assign For Assign If Compare If Assign Assign Call Call Assign Call BoolOp Call If Call If Assign Assign BoolOp Return return:yes" + }, + { + "library": "kornia", + "name": "inverse", + "source_code": "@classmethod\ndef inverse(cls, input: Tensor, module: Module, param: ParamItem, extra_args: Optional[Dict[str, Any]]=None) -> Tensor:\n if extra_args is None:\n extra_args = {}\n if isinstance(module, (K.GeometricAugmentationBase2D,)):\n if module.transform_matrix is None:\n raise ValueError(f'No valid transformation matrix found in {module.__class__}.')\n transform = module.compute_inverse_transformation(module.transform_matrix)\n input = module.inverse_masks(input, params=cls.get_instance_module_param(param), flags=module.flags, transform=transform, **extra_args)\n elif isinstance(module, (K.GeometricAugmentationBase3D,)):\n raise NotImplementedError('The support for 3d mask operations are not yet supported. You are welcome to file a PR in our repo.')\n elif isinstance(module, K.container.ImageSequentialBase):\n input = module.inverse_masks(input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n elif isinstance(module, (K.auto.operations.OperationBase,)):\n input = MaskSequentialOps.inverse(input, module=module.op, param=param, extra_args=extra_args)\n return input", + "docstring": "Inverse a transformation with respect to the parameters. Args: input: the input tensor. module: any torch Module but only kornia augmentation modules will count to apply transformations. param: the corresponding parameters to the module. extra_args: Optional dictionary of extra arguments with specific options for different input types.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\ops.py", + "ast_data": "FunctionDef name:inverse arg:cls arg:input arg:module arg:param arg:extra_args arguments arg arg arg arg arg If Compare Assign If Call If Compare Raise Call Assign Call Assign Call Call If Call Raise Call If Call Assign Call Call If Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "matrix", + "source_code": "def matrix(self) -> Tensor:\n rt = concatenate((self.r.matrix(), self.t.data[..., None]), -1)\n rt_3x3 = pad(rt, (0, 0, 0, 1))\n rt_3x3[..., -1, -1] = 1.0\n return rt_3x3", + "docstring": "Return the matrix representation of shape :math:. Example: >>> s = Se2(So2.identity(1), torch.ones(1, 2)) >>> s.matrix() tensor([[[1., -0., 1.], [0., 1., 1.], [0., 0., 1.]]], grad_fn=)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py", + "ast_data": "FunctionDef name:matrix arg:self arguments arg Assign Call Call Assign Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_sort_zpos", + "source_code": "def set_sort_zpos(self, val):\n self._sort_zpos = val\n self.stale = True", + "docstring": "Set the position to use for z-sorting.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:set_sort_zpos arg:self arg:val arguments arg arg Assign Assign" + }, + { + "library": "sphinx", + "name": "SphinxError", + "source_code": "class SphinxError(Exception):\n category = 'Sphinx error'", + "docstring": "Base class for Sphinx errors. This is the base class for \"nice\" exceptions. When such an exception is raised, Sphinx will abort the build and present the exception category and message to the user. Extensions are encouraged to derive from this exception for their custom errors. Exceptions *not* derived from :exc: are treated as unexpected and shown to the user with a part of the traceback (and the full traceback saved in a temporary file). .. attribute:: category Description of the exception \"category\", used in converting the exception to a string (\"category: message\"). Should be set accordingly in subclasses.", + "type": "class", + "file_path": "sphinx\\sphinx\\errors.py", + "ast_data": "ClassDef name:SphinxError Assign" + }, + { + "library": "scikit-learn", + "name": "_partition_estimators", + "source_code": "def _partition_estimators(n_estimators, n_jobs):\n n_jobs = min(effective_n_jobs(n_jobs), n_estimators)\n n_estimators_per_job = np.full(n_jobs, n_estimators // n_jobs, dtype=int)\n n_estimators_per_job[:n_estimators % n_jobs] += 1\n starts = np.cumsum(n_estimators_per_job)\n return (n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist())", + "docstring": "Private function used to partition estimators between jobs.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\ensemble\\_base.py", + "ast_data": "FunctionDef name:_partition_estimators arg:n_estimators arg:n_jobs arguments arg arg Assign Call Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "compute_pool_size_without_padding", + "source_code": "def compute_pool_size_without_padding(ph, pw):\n stride_h = ops.constant(stride[0], torch.int32)\n stride_w = ops.constant(stride[1], torch.int32)\n pad_h = ops.constant(padding[0], torch.int32)\n pad_w = ops.constant(padding[1], torch.int32)\n kernel_h = ops.constant(kernel_size[0], torch.int32)\n kernel_w = ops.constant(kernel_size[1], torch.int32)\n hstart = ops.sub(ops.mul(ph, stride_h), pad_h)\n wstart = ops.sub(ops.mul(pw, stride_w), pad_w)\n hend = ops.minimum(ops.add(hstart, kernel_h), ops.add(ops.index_expr(height, torch.int32), pad_h))\n wend = ops.minimum(ops.add(wstart, kernel_w), ops.add(ops.index_expr(width, torch.int32), pad_w))\n hstart = ops.maximum(hstart, ops.constant(0, torch.int32))\n wstart = ops.maximum(wstart, ops.constant(0, torch.int32))\n hend = ops.minimum(hend, ops.index_expr(height, torch.int32))\n wend = ops.minimum(wend, ops.index_expr(width, torch.int32))\n divide_factor = ops.mul(ops.sub(hend, hstart), ops.sub(wend, wstart))\n return divide_factor", + "docstring": "This computes the scaling factor that we will divide an element by when", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\lowering.py", + "ast_data": "FunctionDef name:compute_pool_size_without_padding arg:ph arg:pw arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Call Call Call Assign Call Call Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "execute_sql", + "source_code": "def execute_sql(self, result_type):\n row_count = super().execute_sql(result_type)\n is_empty = row_count is None\n row_count = row_count or 0\n for query in self.query.get_related_updates():\n aux_row_count = query.get_compiler(self.using).execute_sql(result_type)\n if is_empty and aux_row_count:\n row_count = aux_row_count\n is_empty = False\n return row_count", + "docstring": "Execute the specified update. Return the number of rows affected by the primary update query. The \"primary update query\" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available.", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\compiler.py", + "ast_data": "FunctionDef name:execute_sql arg:self arg:result_type arguments arg arg Assign Call Call Assign Compare Assign BoolOp For Call Assign Call Call If BoolOp Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_default_layout", + "source_code": "@contextlib.contextmanager\ndef _default_layout(self, layout: layout_lib.Layout):\n previous_default = None\n previous_graph_size = None\n graph = None\n self._register_mesh(layout.mesh)\n try:\n previous_default = self._current_output_layout\n self._current_output_layout = layout.to_string().encode('utf-8')\n _pywrap_dtensor_device.ExperimentalSetDefaultLayout(self._device_info, self._current_output_layout)\n if context.executing_eagerly():\n with ops.device(self.name):\n yield\n else:\n graph = ops.get_default_graph()\n previous_graph_size = len(graph.get_operations())\n yield\n finally:\n if graph is not None:\n for operation in graph.get_operations()[previous_graph_size:]:\n operation._set_attr('_layout', attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(s=[self._current_output_layout])))\n operation._set_attr('_mesh', attr_value_pb2.AttrValue(s=layout.mesh.to_string().encode('utf-8')))\n self._current_output_layout = previous_default\n if self._current_output_layout is None:\n _pywrap_dtensor_device.ExperimentalClearDefaultLayout(self._device_info)\n else:\n _pywrap_dtensor_device.ExperimentalSetDefaultLayout(self._device_info, self._current_output_layout.decode('utf-8'))", + "docstring": "Sets a default output layout for all ops in the scope. Note: This is an internal helper method, which is not user facing api. Useful for requesting a specific layout for ops which would have no inferred layout, e.g. tf.zeros. Caveats: - Currently only affects the first output of an op. For Op with multiple outputs, this does not support yet. - All Ops in the scope will be attached with the same layout. This might not be valid as the rank is different. The current suggestion is: Try to wrap the raw op wheneven possible. Args: layout: A Layout for the outputs of all operations in this scope. Yields: Nothing.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py", + "ast_data": "FunctionDef name:_default_layout arg:self arg:layout arguments arg arg Assign Assign Assign Call Try Assign Assign Call Call Call If Call With Call Assign Call Assign Call Call If Compare For Call Call Call Call Call Call Call Call Assign If Compare Call Call Call" + }, + { + "library": "tensorflow", + "name": "clipnorm", + "source_code": "@property\ndef clipnorm(self):\n return self._clipnorm", + "docstring": "or . If set, clips gradients to a maximum norm.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py", + "ast_data": "FunctionDef name:clipnorm arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "__init__", + "source_code": "def __init__(self, image_size: ImageSize, params: Tensor) -> None:\n super().__init__(AffineTransform(), OrthographicProjection(), image_size, params)\n if params.shape[-1] != 4 or len(params.shape) > 2:\n raise ValueError('params must be of shape B, 4 for ORTHOGRAPHIC Camera')", + "docstring": "Construct Orthographic class. Args: image_size: Image size params: Camera parameters of shape :math: of the form :math:.", + "type": "method", + "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:image_size arg:params arguments arg arg arg Call Call Call Call If BoolOp Compare Compare Call Raise Call" + }, + { + "library": "django", + "name": "check_password", + "source_code": "def check_password(password, encoded, setter=None, preferred='default'):\n is_correct, must_update = verify_password(password, encoded, preferred=preferred)\n if setter and is_correct and must_update:\n setter(password)\n return is_correct", + "docstring": "Return a boolean of whether the raw password matches the three part encoded digest. If setter is specified, it'll be called when you need to regenerate the password.", + "type": "function", + "file_path": "django\\django\\contrib\\auth\\hashers.py", + "ast_data": "FunctionDef name:check_password arg:password arg:encoded arg:setter arg:preferred arguments arg arg arg arg Assign Call If BoolOp Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_ops_in_metagraph", + "source_code": "def _get_ops_in_metagraph(meta_graph_def):\n return set(meta_graph_lib.ops_used_by_graph_def(meta_graph_def.graph_def))", + "docstring": "Returns a set of the ops in the MetaGraph. Returns the set of all the ops used in the MetaGraphDef indicated by the tag_set stored in SavedModel directory. Args: meta_graph_def: MetaGraphDef to list the ops of. Returns: A set of ops.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py", + "ast_data": "FunctionDef name:_get_ops_in_metagraph arg:meta_graph_def arguments arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "caching_module_getattr", + "source_code": "def caching_module_getattr(cls):\n assert cls.__name__ == '__getattr__'\n props = {name: prop for name, prop in vars(cls).items() if isinstance(prop, property)}\n instance = cls()\n\n @functools.cache\n def __getattr__(name):\n if name in props:\n return props[name].__get__(instance)\n raise AttributeError(f'module {cls.__module__!r} has no attribute {name!r}')\n return __getattr__", + "docstring": "Helper decorator for implementing module-level `` for deprecating module globals). The properties are all implicitly cached. Moreover, a suitable AttributeError is generated and raised if no property with the given name exists.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\_api\\__init__.py", + "ast_data": "FunctionDef name:caching_module_getattr arg:cls arguments arg Compare Assign Call Call Call Assign Call FunctionDef name:__getattr__ arg:name arguments arg If Compare Return return:yes Call Raise Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_getdefaults", + "source_code": "def _getdefaults(self, kw, ignore=frozenset()):\n defaults = self._cycler_items[self._idx]\n if any((kw.get(k, None) is None for k in {*defaults} - ignore)):\n self._idx = (self._idx + 1) % len(self._cycler_items)\n return {k: v for k, v in defaults.items() if k not in ignore}\n else:\n return {}", + "docstring": "If some keys in the property cycle (excluding those in the set *ignore*) are absent or set to None in the dict *kw*, return a copy of the next entry in the property cycle, excluding keys in *ignore*. Otherwise, don't advance the property cycle, and return an empty dict.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:_getdefaults arg:self arg:kw arg:ignore arguments arg arg arg Call Assign If Call Compare Call Assign Call Return return:yes Call Compare Return return:no" + }, + { + "library": "tensorflow", + "name": "_launch_cli", + "source_code": "def _launch_cli(self):\n self._register_this_run_info(self._run_cli)\n response = self._run_cli.run_ui(init_command=self._init_command, title=self._title, title_color=self._title_color)\n return response", + "docstring": "Launch the interactive command-line interface. Returns: The OnRunStartResponse specified by the user using the \"run\" command.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\local_cli_wrapper.py", + "ast_data": "FunctionDef name:_launch_cli arg:self arguments arg Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "DummyGensym", + "source_code": "class DummyGensym:\n\n def __init__(self):\n self._idx = 0\n\n def new_name(self, stem='tmp'):\n self._idx += 1\n return stem + '_' + str(1000 + self._idx)", + "docstring": "A dumb gensym that suffixes a stem by sequential numbers from 1000.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\common_transformers\\anf.py", + "ast_data": "ClassDef name:DummyGensym FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:new_name arg:self arg:stem arguments arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "validate", + "source_code": "def validate(self, other) -> None:\n if other is None:\n return\n if other.table_type != self.table_type:\n raise TypeError(f'incompatible table_type with existing [{other.table_type} - {self.table_type}]')\n for c in ['index_axes', 'non_index_axes', 'values_axes']:\n sv = getattr(self, c, None)\n ov = getattr(other, c, None)\n if sv != ov:\n for i, sax in enumerate(sv):\n oax = ov[i]\n if sax != oax:\n if c == 'values_axes' and sax.kind != oax.kind:\n raise ValueError(f'Cannot serialize the column [{oax.values[0]}] because its data contents are not [{sax.kind}] but [{oax.kind}] object dtype')\n raise ValueError(f'invalid combination of [{c}] on appending data [{sax}] vs current table [{oax}]')\n raise Exception(f'invalid combination of [{c}] on appending data [{sv}] vs current table [{ov}]')", + "docstring": "validate against an existing table", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:validate arg:self arg:other arguments arg arg If Compare Return return:no If Compare Raise Call For Assign Call Assign Call If Compare For Call Assign If Compare If BoolOp Compare Compare Raise Call Raise Call Raise Call" + }, + { + "library": "cherrypy", + "name": "release_lock", + "source_code": "def release_lock(self):\n self.locks[self.id].release()\n self.locked = False", + "docstring": "Release the lock on the currently-loaded session data.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:release_lock arg:self arguments arg Call Assign" + }, + { + "library": "pytorch", + "name": "immutable_list", + "source_code": "@compatibility(is_backward_compatible=True)\nclass immutable_list(list[_T]):\n __delitem__ = _no_mutation\n __iadd__ = _no_mutation\n __imul__ = _no_mutation\n __setitem__ = _no_mutation\n append = _no_mutation\n clear = _no_mutation\n extend = _no_mutation\n insert = _no_mutation\n pop = _no_mutation\n remove = _no_mutation\n reverse = _no_mutation\n sort = _no_mutation\n\n def __hash__(self) -> int:\n return hash(tuple(self))\n\n def __reduce__(self) -> tuple[type[Self], tuple[tuple[_T, ...]]]:\n return (type(self), (tuple(self),))", + "docstring": "An immutable version of :class:.", + "type": "class", + "file_path": "pytorch\\torch\\fx\\immutable_collections.py", + "ast_data": "ClassDef name:immutable_list Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call Call FunctionDef name:__reduce__ arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "numpy", + "name": "subst_vars", + "source_code": "def subst_vars(target, source, d):\n var = re.compile('@([a-zA-Z_]+)@')\n with open(source, 'r') as fs:\n with open(target, 'w') as ft:\n for l in fs:\n m = var.search(l)\n if m:\n ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)]))\n else:\n ft.write(l)", + "docstring": "Substitute any occurrence of @foo@ by d['foo'] from source file into target.", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\command\\build_src.py", + "ast_data": "FunctionDef name:subst_vars arg:target arg:source arg:d arguments arg arg arg Assign Call With Call With Call For Assign Call If Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "from_value", + "source_code": "@classmethod\ndef from_value(cls, value: Any) -> 'DynamicRaggedShape.Spec':\n initial = super(DynamicRaggedShape.Spec, cls).from_value(value)\n return DynamicRaggedShape.Spec(row_partitions=initial._row_partitions, static_inner_shape=initial._static_inner_shape, dtype=initial._inner_shape.dtype)", + "docstring": "Create a Spec from a DynamicRaggedShape.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:from_value arg:cls arg:value arguments arg arg Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "maybe_init_scope", + "source_code": "@tf_contextlib.contextmanager\ndef maybe_init_scope(layer):\n if ops.executing_eagerly_outside_functions() and getattr(layer, '_keras_style', True):\n with ops.init_scope():\n yield\n else:\n yield", + "docstring": "Open an if in V2 mode and using the keras graph. Args: layer: The Layer/Model that is currently active. Yields: None", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py", + "ast_data": "FunctionDef name:maybe_init_scope arg:layer arguments arg If BoolOp Call Call With Call" + }, + { + "library": "numpy", + "name": "argmin", + "source_code": "def argmin(self, axis=None, out=None):\n return N.ndarray.argmin(self, axis, out)._align(axis)", + "docstring": "Indexes of the minimum values along an axis. Return the indexes of the first occurrences of the minimum values along the specified axis. If axis is None, the index is for the flattened matrix. Parameters ---------- See for complete descriptions. See Also -------- numpy.argmin Notes ----- This is the same as , but returns a object where would return an . Examples -------- >>> x = -np.matrix(np.arange(12).reshape((3,4))); x matrix([[ 0, -1, -2, -3], [ -4, -5, -6, -7], [ -8, -9, -10, -11]]) >>> x.argmin() 11 >>> x.argmin(0) matrix([[2, 2, 2, 2]]) >>> x.argmin(1) matrix([[3], [3], [3]])", + "type": "method", + "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py", + "ast_data": "FunctionDef name:argmin arg:self arg:axis arg:out arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "cherrypy", + "name": "emit", + "source_code": "def emit(self, record):\n pass", + "docstring": "Emit a log record doing no-op.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cplogging.py", + "ast_data": "FunctionDef name:emit arg:self arg:record arguments arg arg" + }, + { + "library": "pytorch", + "name": "Worker", + "source_code": "class Worker:\n __slots__ = ['id', 'local_rank', 'global_rank', 'role_rank', 'world_size', 'role_world_size']\n\n def __init__(self, local_rank: int, global_rank: int=-1, role_rank: int=-1, world_size: int=-1, role_world_size: int=-1):\n self.id: Any = None\n self.local_rank: int = local_rank\n self.global_rank: int = global_rank\n self.role_rank: int = role_rank\n self.world_size: int = world_size\n self.role_world_size: int = role_world_size\n\n def __str__(self):\n return f'local_rank={self.local_rank},global_rank={self.global_rank},role_rank={self.role_rank},world_size={self.world_size},role_world_size={self.role_world_size}'\n\n def __repr__(self):\n return str(self)", + "docstring": "A worker instance. Contrast this with ``. Args: id (Any): uniquely identifies a worker (interpreted by the agent) local_rank (int): local rank of the worker global_rank (int): global rank of the worker role_rank (int): rank of the worker across all workers that have the same role world_size (int): number of workers (globally) role_world_size (int): number of workers that have the same role", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py", + "ast_data": "ClassDef name:Worker Assign FunctionDef name:__init__ arg:self arg:local_rank arg:global_rank arg:role_rank arg:world_size arg:role_world_size arguments arg arg arg arg arg arg FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_aggregate_hook", + "source_code": "def _aggregate_hook(self, name):\n feature_dim = self.data_groups[name]['feature_dim']\n features = self.data_groups[name]['features']\n agg_fn = self.data_groups[name]['aggregate_fn']\n\n def hook(module, input) -> None:\n input_data = input[0]\n data = self.data_groups[name].get('data')\n if features is None:\n if data is None:\n data = torch.zeros_like(input_data)\n self.state[name]['mask'] = torch.ones_like(input_data)\n out_data = agg_fn(data, input_data)\n else:\n if data is None:\n out_data = [0 for _ in range(0, len(features))]\n self.state[name]['mask'] = [0 for _ in range(0, len(features))]\n else:\n out_data = data\n for feature_idx in range(len(features)):\n feature_tensor = torch.Tensor([features[feature_idx]]).long().to(input_data.device)\n data_feature = torch.index_select(input_data, feature_dim, feature_tensor)\n if data is None:\n curr_data = torch.zeros_like(data_feature)\n self.state[name]['mask'][feature_idx] = torch.ones_like(data_feature)\n else:\n curr_data = data[feature_idx]\n out_data[feature_idx] = agg_fn(curr_data, data_feature)\n self.data_groups[name]['data'] = out_data\n return hook", + "docstring": "Returns hook that computes aggregate of activations passing through.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py", + "ast_data": "FunctionDef name:_aggregate_hook arg:self arg:name arguments arg arg Assign Assign Assign FunctionDef name:hook arg:module arg:input arguments arg arg Assign Assign Call If Compare If Compare Assign Call Assign Call Assign Call If Compare Assign Call Call Assign Call Call Assign For Call Call Assign Call Call Call Assign Call If Compare Assign Call Assign Call Assign Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "shape", + "source_code": "@property\ndef shape(self):\n return self._shape", + "docstring": "The specified by this type for the SparseTensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py", + "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes" + }, + { + "library": "sphinx", + "name": "copyfile", + "source_code": "def copyfile(source: str | os.PathLike[str], dest: str | os.PathLike[str], *, force: bool=False) -> None:\n source = Path(source)\n dest = Path(dest)\n if not source.exists():\n msg = f'{source} does not exist'\n raise FileNotFoundError(msg)\n if not (dest_exists := dest.exists()) or not filecmp.cmp(source, dest, shallow=False):\n if not force and dest_exists:\n from sphinx.util import logging\n logger = logging.getLogger(__name__)\n msg = __('Aborted attempted copy from %s to %s (the destination path has existing data).')\n logger.warning(msg, source, dest, type='misc', subtype='copy_overwrite')\n return\n if sys.platform == 'win32':\n shutil.copy2(source, dest)\n else:\n shutil.copyfile(source, dest)\n with contextlib.suppress(OSError):\n _copy_times(source, dest)", + "docstring": "Copy a file and its modification times, if possible. :param source: An existing source to copy. :param dest: The destination path. :param bool force: Overwrite the destination file even if it exists. :raise FileNotFoundError: The *source* does not exist. .. note:: :func: is a no-op if *source* and *dest* are identical.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\osutil.py", + "ast_data": "FunctionDef name:copyfile arg:source arg:dest arguments arg arg arg Assign Call Assign Call If Call Assign Raise Call If BoolOp Call Call If BoolOp Assign Call Assign Call Call Return return:no If Compare Call Call With Call Call" + }, + { + "library": "scikit-learn", + "name": "_check_input", + "source_code": "def _check_input(self, X, in_fit, check_positive=False, check_shape=False):\n X = validate_data(self, X, ensure_2d=True, dtype=FLOAT_DTYPES, force_writeable=True, copy=self.copy, ensure_all_finite='allow-nan', reset=in_fit)\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', 'All-NaN (slice|axis) encountered')\n if check_positive and self.method == 'box-cox' and (np.nanmin(X) <= 0):\n raise ValueError('The Box-Cox transformation can only be applied to strictly positive data')\n if check_shape and (not X.shape[1] == len(self.lambdas_)):\n raise ValueError('Input data has a different number of features than fitting data. Should have {n}, data has {m}'.format(n=len(self.lambdas_), m=X.shape[1]))\n return X", + "docstring": "Validate the input before fit and transform. Parameters ---------- X : array-like of shape (n_samples, n_features) in_fit : bool Whether or not is called from or other methods, e.g. , , etc. check_positive : bool, default=False If True, check that all data is positive and non-zero (only if ``). check_shape : bool, default=False If True, check that n_features matches the length of self.lambdas_", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", + "ast_data": "FunctionDef name:_check_input arg:self arg:X arg:in_fit arg:check_positive arg:check_shape arguments arg arg arg arg arg Assign Call With Call Call If BoolOp Compare Compare Call Raise Call If BoolOp Compare Call Raise Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "_boolrelextrema", + "source_code": "def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'):\n if int(order) != order or order < 1:\n raise ValueError('Order must be an int >= 1')\n datalen = data.shape[axis]\n locs = np.arange(0, datalen)\n results = np.ones(data.shape, dtype=bool)\n main = data.take(locs, axis=axis, mode=mode)\n for shift in range(1, order + 1):\n plus = data.take(locs + shift, axis=axis, mode=mode)\n minus = data.take(locs - shift, axis=axis, mode=mode)\n results &= comparator(main, plus)\n results &= comparator(main, minus)\n if ~results.any():\n return results\n return results", + "docstring": "Calculate the relative extrema of . Relative extrema are calculated by finding locations where `datadata` that is True at an extrema, False otherwise. See also -------- argrelmax, argrelmin Examples -------- >>> import numpy as np >>> from scipy.signal._peak_finding import _boolrelextrema >>> testdata = np.array([1,2,3,2,1]) >>> _boolrelextrema(testdata, np.greater, axis=0) array([False, False, True, False, False], dtype=bool)", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_peak_finding.py", + "ast_data": "FunctionDef name:_boolrelextrema arg:data arg:comparator arg:axis arg:order arg:mode arguments arg arg arg arg arg If BoolOp Compare Call Compare Raise Call Assign Assign Call Assign Call Assign Call For Call Assign Call Assign Call Call Call If Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "gs_distill", + "source_code": "def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):\n if eps:\n paper_option = ['-dEPSCrop']\n elif ptype == 'figure':\n paper_option = [f'-dDEVICEWIDTHPOINTS={bbox[2]}', f'-dDEVICEHEIGHTPOINTS={bbox[3]}']\n else:\n paper_option = [f'-sPAPERSIZE={ptype}']\n psfile = tmpfile + '.ps'\n dpi = mpl.rcParams['ps.distiller.res']\n cbook._check_and_log_subprocess([mpl._get_executable_info('gs').executable, '-dBATCH', '-dNOPAUSE', '-r%d' % dpi, '-sDEVICE=ps2write', *paper_option, f'-sOutputFile={psfile}', tmpfile], _log)\n os.remove(tmpfile)\n shutil.move(psfile, tmpfile)\n if eps:\n pstoeps(tmpfile, bbox, rotated=rotated)", + "docstring": "Use ghostscript's pswrite or epswrite device to distill a file. This yields smaller files without illegal encapsulated postscript operators. The output is low-level, converting text to outlines.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py", + "ast_data": "FunctionDef name:gs_distill arg:tmpfile arg:eps arg:ptype arg:bbox arg:rotated arguments arg arg arg arg arg If Assign If Compare Assign Assign Assign Assign Call Call Call Call If Call" + }, + { + "library": "cherrypy", + "name": "Config", + "source_code": "class Config(reprconf.Config):\n\n def update(self, config):\n _if_filename_register_autoreload(config)\n super(Config, self).update(config)\n\n def _apply(self, config):\n if isinstance(config.get('global'), dict):\n if len(config) > 1:\n cherrypy.checker.global_config_contained_paths = True\n config = config['global']\n if 'tools.staticdir.dir' in config:\n config['tools.staticdir.section'] = 'global'\n super(Config, self)._apply(config)\n\n @staticmethod\n def __call__(**kwargs):\n\n def tool_decorator(f):\n _Vars(f).setdefault('_cp_config', {}).update(kwargs)\n return f\n return tool_decorator", + "docstring": "The 'global' configuration data for the entire CherryPy process.", + "type": "class", + "file_path": "cherrypy\\cherrypy\\_cpconfig.py", + "ast_data": "ClassDef name:Config FunctionDef name:update arg:self arg:config arguments arg arg Call Call Call FunctionDef name:_apply arg:self arg:config arguments arg arg If Call Call If Compare Call Assign Assign If Compare Assign Call Call FunctionDef name:__call__ arguments arg FunctionDef name:tool_decorator arg:f arguments arg Call Call Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "get_sort_key", + "source_code": "def get_sort_key(x: IterationRangesEntry) -> tuple[int, bool]:\n divisor_hint = V.graph.sizevars.size_hint(x.divisor, fallback=config.unbacked_symint_fallback)\n length_is_one_hint = V.graph.sizevars.size_hint(x.length, fallback=config.unbacked_symint_fallback) == 1\n return (divisor_hint, not length_is_one_hint)", + "docstring": "Gets the key for sorting nodes. When two nodes have the same divisor, the node with length as 1 should be handled first so the current divisor is not changed after multiplied node.length. Returns for ascending sort.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py", + "ast_data": "FunctionDef name:get_sort_key arg:x arguments arg Assign Call Assign Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_name_list", + "source_code": "def _name_list(tensor_list):\n return [compat.as_bytes(t.name) for t in tensor_list]", + "docstring": "Utility function for transitioning to the new session API. Args: tensor_list: a list of s. Returns: A list of each s name (as byte arrays).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", + "ast_data": "FunctionDef name:_name_list arg:tensor_list arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "input_reshard", + "source_code": "def input_reshard(module: torch.nn.Module, tp_device_mesh: DeviceMesh, input_reshard_dim: Optional[int]=None) -> torch.nn.Module:\n if input_reshard_dim is None:\n return module\n cx: Optional[torch.autograd.graph.saved_tensors_hooks] = None\n\n def input_reshard_forward_pre_hook(_: torch.nn.Module, _i: tuple[Any, ...]) -> None:\n saved_tensor_hooks = torch.autograd.graph.saved_tensors_hooks(partial(_pack_hook_tp, tp_device_mesh, input_reshard_dim), partial(_unpack_hook_tp, tp_device_mesh, input_reshard_dim))\n saved_tensor_hooks.__enter__()\n nonlocal cx\n cx = saved_tensor_hooks\n\n def input_reshard_backward_hook(_: torch.nn.Module, _i: tuple[Any, ...], _o: Any) -> Any:\n nonlocal cx\n cx.__exit__()\n module.register_forward_pre_hook(input_reshard_forward_pre_hook)\n module.register_forward_hook(input_reshard_backward_hook)\n return module", + "docstring": "Register hooks to an nn.Module for input resharding, enabling sharding and restoration during backward computation. Register hooks to an nn.Module with input resharding so that we can shard per the given and and restore the input back when recomputing the activations in the backward. The reason why we can do this is that for Tensor Parallel(TP), the input are same across all TP ranks. Args: module (:class:): Module to be registered with input resharding. tp_device_mesh (:class:): Object which describes the mesh topology of devices for Tensor Parallel. input_reshard_dim (Optional[int]): The dimension of where we perform the sharding of input. If set None, there is no sharding of input. Default: None Return: A :class: object registered with TP input resharding.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\parallel\\input_reshard.py", + "ast_data": "FunctionDef name:input_reshard arg:module arg:tp_device_mesh arg:input_reshard_dim arguments arg arg arg If Compare Return return:yes FunctionDef name:input_reshard_forward_pre_hook arg:_ arg:_i arguments arg arg Assign Call Call Call Call Assign FunctionDef name:input_reshard_backward_hook arg:_ arg:_i arg:_o arguments arg arg arg Call Call Call Return return:yes" + }, + { + "library": "pygame", + "name": "read", + "source_code": "def read():\n with open(PATH) as setup_in:\n return setup_in.read()", + "docstring": "Return the contents of the Windows Common Setup as a string", + "type": "function", + "file_path": "pygame\\buildconfig\\setup_win_common.py", + "ast_data": "FunctionDef name:read arguments With Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "get_feature_names_out", + "source_code": "def get_feature_names_out(self, input_features=None):\n transformer_with_feature_names_out = []\n for name, trans, _ in self._iter():\n if not hasattr(trans, 'get_feature_names_out'):\n raise AttributeError('Transformer %s (type %s) does not provide get_feature_names_out.' % (str(name), type(trans).__name__))\n feature_names_out = trans.get_feature_names_out(input_features)\n transformer_with_feature_names_out.append((name, feature_names_out))\n return self._add_prefix_for_feature_names_out(transformer_with_feature_names_out)", + "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Assign For Call If Call Raise Call Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "cryptography", + "name": "encrypt", + "source_code": "@abc.abstractmethod\ndef encrypt(self, plaintext: bytes, padding: AsymmetricPadding) -> bytes:\n pass", + "docstring": "Encrypts the given plaintext.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py", + "ast_data": "FunctionDef name:encrypt arg:self arg:plaintext arg:padding arguments arg arg arg" + }, + { + "library": "django", + "name": "mail_managers", + "source_code": "def mail_managers(subject, message, fail_silently=False, connection=None, html_message=None):\n _send_server_message(setting_name='MANAGERS', subject=subject, message=message, html_message=html_message, fail_silently=fail_silently, connection=connection)", + "docstring": "Send a message to the managers, as defined by the MANAGERS setting.", + "type": "function", + "file_path": "django\\django\\core\\mail\\__init__.py", + "ast_data": "FunctionDef name:mail_managers arg:subject arg:message arg:fail_silently arg:connection arg:html_message arguments arg arg arg arg arg Call" + }, + { + "library": "pytorch", + "name": "is_tensor_evenly_shardable", + "source_code": "def is_tensor_evenly_shardable(shape: Sequence[int], spec: DTensorSpec) -> bool:\n shards_map = [1] * len(shape)\n for i, placement in enumerate(spec.placements):\n if placement.is_shard():\n shard_dim = cast(Shard, placement).dim\n shards_map[shard_dim] *= spec.mesh.size(i)\n for i, dim_size in enumerate(shape):\n if shards_map[i] > 1 and dim_size % shards_map[i] != 0:\n return False\n return True", + "docstring": "Check if the shape is evenly shardable according to the spec.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\utils.py", + "ast_data": "FunctionDef name:is_tensor_evenly_shardable arg:shape arg:spec arguments arg arg Assign Call For Call If Call Assign Call Call For Call If BoolOp Compare Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "categorical_hinge", + "source_code": "@dispatch.add_dispatch_support\ndef categorical_hinge(y_true, y_pred):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n pos = math_ops.reduce_sum(y_true * y_pred, axis=-1)\n neg = math_ops.reduce_max((1.0 - y_true) * y_pred, axis=-1)\n zero = math_ops.cast(0.0, y_pred.dtype)\n return math_ops.maximum(neg - pos + 1.0, zero)", + "docstring": "Computes the categorical hinge loss between and . where Standalone usage: >>> y_true = np.random.randint(0, 3, size=(2,)) >>> y_true = tf.keras.utils.to_categorical(y_true, num_classes=3) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.categorical_hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> pos = np.sum(y_true * y_pred, axis=-1) >>> neg = np.amax((1. - y_true) * y_pred, axis=-1) >>> assert np.array_equal(loss.numpy(), np.maximum(0., neg - pos + 1.)) Args: y_true: The ground truth values. values are expected to be either or (i.e. a one-hot-encoded tensor). y_pred: The predicted values. Returns: Categorical hinge loss values.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:categorical_hinge arg:y_true arg:y_pred arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "CosineSimilarity", + "source_code": "class CosineSimilarity(Module):\n __constants__ = ['dim', 'eps']\n dim: int\n eps: float\n\n def __init__(self, dim: int=1, eps: float=1e-08) -> None:\n super().__init__()\n self.dim = dim\n self.eps = eps\n\n def forward(self, x1: Tensor, x2: Tensor) -> Tensor:\n return F.cosine_similarity(x1, x2, self.dim, self.eps)", + "docstring": "Returns cosine similarity between :math: and :math:, computed along . .. math :: \\text{similarity} = \\dfrac{x_1 \\cdot x_2}{\\max(\\Vert x_1 \\Vert _2 \\cdot \\Vert x_2 \\Vert _2, \\epsilon)}. Args: dim (int, optional): Dimension where cosine similarity is computed. Default: 1 eps (float, optional): Small value to avoid division by zero. Default: 1e-8 Shape: - Input1: :math: where D is at position - Input2: :math:, same number of dimensions as x1, matching x1 size at dimension , and broadcastable with x1 at other dimensions. - Output: :math: Examples: >>> input1 = torch.randn(100, 128) >>> input2 = torch.randn(100, 128) >>> cos = nn.CosineSimilarity(dim=1, eps=1e-6) >>> output = cos(input1, input2)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\distance.py", + "ast_data": "ClassDef name:CosineSimilarity Assign FunctionDef name:__init__ arg:self arg:dim arg:eps arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:x1 arg:x2 arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "forward_min_event_ndims", + "source_code": "@property\ndef forward_min_event_ndims(self):\n return self._forward_min_event_ndims", + "docstring": "Returns the minimal number of dimensions bijector.forward operates on.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py", + "ast_data": "FunctionDef name:forward_min_event_ndims arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "Ipython", + "source_code": "@cli.cls_cmd('ipython')\nclass Ipython(Python):\n ctx = CONTEXT\n pythonpath = Python.pythonpath\n\n @classmethod\n def run(cls, pythonpath, **kwargs):\n cls._setup(pythonpath, **kwargs)\n import IPython\n IPython.embed(user_ns={})", + "docstring": ":wrench: Start IPython shell with PYTHONPATH set. Running is equivalent to: 1. Execute build command (skip by passing the global option). 2. Set the PYTHONPATH environment variable (query with ). 3. Run the interpreter.", + "type": "class", + "file_path": "scipy\\dev.py", + "ast_data": "ClassDef name:Ipython Assign Assign FunctionDef name:run arg:cls arg:pythonpath arguments arg arg arg Call Call Call" + }, + { + "library": "django", + "name": "rttopo_version", + "source_code": "def rttopo_version(self):\n return self._get_spatialite_func('rttopo_version()')", + "docstring": "Return the version of RTTOPO library used by SpatiaLite.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py", + "ast_data": "FunctionDef name:rttopo_version arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "steps_per_run", + "source_code": "@property\ndef steps_per_run(self):\n return self._extended.steps_per_run", + "docstring": "DEPRECATED: use .extended.steps_per_run instead.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py", + "ast_data": "FunctionDef name:steps_per_run arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "_skip_if_str_or_tuple", + "source_code": "def _skip_if_str_or_tuple(window):\n if isinstance(window, str) or isinstance(window, tuple) or callable(window):\n return None\n else:\n return window", + "docstring": "Handle being a str or a tuple or an array-like.", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_delegators.py", + "ast_data": "FunctionDef name:_skip_if_str_or_tuple arg:window arguments arg If BoolOp Call Call Call Return return:no Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_position", + "source_code": "def get_position(self):\n return (self._x, self._y)", + "docstring": "Return the (x, y) position of the text.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:get_position arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "_register_tool_class", + "source_code": "def _register_tool_class(canvas_cls, tool_cls=None):\n if tool_cls is None:\n return functools.partial(_register_tool_class, canvas_cls)\n _tool_registry.add((canvas_cls, tool_cls))\n return tool_cls", + "docstring": "Decorator registering *tool_cls* as a tool class for *canvas_cls*.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "FunctionDef name:_register_tool_class arg:canvas_cls arg:tool_cls arguments arg arg If Compare Return return:yes Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "ClusterMixin", + "source_code": "class ClusterMixin:\n _estimator_type = 'clusterer'\n\n def __sklearn_tags__(self):\n tags = super().__sklearn_tags__()\n tags.estimator_type = 'clusterer'\n if tags.transformer_tags is not None:\n tags.transformer_tags.preserves_dtype = []\n return tags\n\n def fit_predict(self, X, y=None, **kwargs):\n self.fit(X, **kwargs)\n return self.labels_", + "docstring": "Mixin class for all cluster estimators in scikit-learn. - set estimator type to through the tag; - method returning the cluster labels associated to each sample. Examples -------- >>> import numpy as np >>> from sklearn.base import BaseEstimator, ClusterMixin >>> class MyClusterer(ClusterMixin, BaseEstimator): ... def fit(self, X, y=None): ... self.labels_ = np.ones(shape=(len(X),), dtype=np.int64) ... return self >>> X = [[1, 2], [2, 3], [3, 4]] >>> MyClusterer().fit_predict(X) array([1, 1, 1])", + "type": "class", + "file_path": "scikit-learn\\sklearn\\base.py", + "ast_data": "ClassDef name:ClusterMixin Assign FunctionDef name:__sklearn_tags__ arg:self arguments arg Assign Call Call Assign If Compare Assign Return return:yes FunctionDef name:fit_predict arg:self arg:X arg:y arguments arg arg arg arg Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, X, copy=None):\n copy = copy if copy is not None else self.copy\n X = validate_data(self, X, accept_sparse='csr', force_writeable=True, copy=copy, reset=False)\n return normalize(X, norm=self.norm, axis=1, copy=False)", + "docstring": "Scale each non zero row of X to unit norm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to normalize, row by row. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. copy : bool, default=None Copy the input X or not. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", + "ast_data": "FunctionDef name:transform arg:self arg:X arg:copy arguments arg arg arg Assign Compare Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_placeholder_stack_trace", + "source_code": "def get_placeholder_stack_trace(placeholder: PlaceholderInfo) -> Optional[str]:\n if placeholder.stack_trace:\n return placeholder.stack_trace\n for user in placeholder.users:\n if user.stack_trace:\n return user.stack_trace\n return None", + "docstring": "Gets the first non-empty stack trace of a placeholder or its users.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\cudagraph_utils.py", + "ast_data": "FunctionDef name:get_placeholder_stack_trace arg:placeholder arguments arg If Return return:yes For If Return return:yes Return return:no" + }, + { + "library": "tensorflow", + "name": "_get_and_write_registered_savers", + "source_code": "def _get_and_write_registered_savers(registered_trackables: Dict[str, List[_TrackableData]], object_graph_proto: trackable_object_graph_pb2.TrackableObjectGraph) -> Dict[str, Dict[str, base.Trackable]]:\n registered_savers = collections.defaultdict(dict)\n for saver_name, trackables in registered_trackables.items():\n for td in trackables:\n registered_savers[saver_name][td.object_name] = td.object_to_save\n object_proto = object_graph_proto.nodes[td.node_id]\n object_proto.registered_saver.name = saver_name\n object_proto.registered_saver.object_name = td.object_name\n return registered_savers", + "docstring": "Generates dictionary of registered savers and updates the proto.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util.py", + "ast_data": "FunctionDef name:_get_and_write_registered_savers arg:registered_trackables arg:object_graph_proto arguments arg arg Assign Call For Call For Assign Assign Assign Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_3d_properties", + "source_code": "def set_3d_properties(self, path, zs=0, zdir='z', axlim_clip=False):\n Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir, axlim_clip=axlim_clip)\n self._code3d = path.codes", + "docstring": "Set the *z* position and direction of the path patch. Parameters ---------- path : zs : float The location along the *zdir* axis in 3D space to position the path patch. zdir : {'x', 'y', 'z', 3-tuple} Plane to plot path patch orthogonal to. Default: 'z'. See for a description of the values. axlim_clip : bool, default: False Whether to hide path patches with a point outside the axes view limits. .. versionadded:: 3.10", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:set_3d_properties arg:self arg:path arg:zs arg:zdir arg:axlim_clip arguments arg arg arg arg arg Call Assign" + }, + { + "library": "pytorch", + "name": "norm", + "source_code": "@_apply_docstring_templates\ndef norm(input: Union[Tensor, MaskedTensor], ord: Optional[float]=2.0, dim: DimOrDims=None, *, keepdim: Optional[bool]=False, dtype: Optional[DType]=None, mask: Optional[Tensor]=None) -> Tensor:\n if dtype is None:\n dtype = input.dtype\n mask_input = _combine_input_and_mask(norm, input, mask, ord)\n if mask_input.layout == torch.strided:\n dim_ = _canonical_dim(dim, input.ndim)\n return torch.linalg.vector_norm(mask_input, ord, dim_, bool(keepdim), dtype=dtype)\n else:\n raise ValueError(f'masked norm expects strided tensor (got {mask_input.layout} tensor)')", + "docstring": "{reduction_signature} {reduction_descr} The identity value of norm operation, which is used to start the reduction, is ``. {reduction_args} {reduction_example}", + "type": "function", + "file_path": "pytorch\\torch\\masked\\_ops.py", + "ast_data": "FunctionDef name:norm arg:input arg:ord arg:dim arguments arg arg arg arg arg arg If Compare Assign Assign Call If Compare Assign Call Return return:yes Call Call Raise Call" + }, + { + "library": "pytorch", + "name": "zeros_", + "source_code": "def zeros_(tensor: Tensor) -> Tensor:\n return _no_grad_zero_(tensor)", + "docstring": "Fill the input Tensor with the scalar value . Args: tensor: an n-dimensional Examples: >>> w = torch.empty(3, 5) >>> nn.init.zeros_(w)", + "type": "function", + "file_path": "pytorch\\torch\\nn\\init.py", + "ast_data": "FunctionDef name:zeros_ arg:tensor arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "_validate_session_key", + "source_code": "def _validate_session_key(self, key):\n return key and len(key) >= 8", + "docstring": "Key must be truthy and at least 8 characters long. 8 characters is an arbitrary lower bound for some minimal key security.", + "type": "method", + "file_path": "django\\django\\contrib\\sessions\\backends\\base.py", + "ast_data": "FunctionDef name:_validate_session_key arg:self arg:key arguments arg arg Return return:yes BoolOp Compare Call" + }, + { + "library": "pytorch", + "name": "override_getattribute_for_subclasses", + "source_code": "@contextmanager\ndef override_getattribute_for_subclasses(args):\n tensor_type_to_old_getattribute: dict[type[torch.Tensor], tuple[Callable, set[str]]] = {}\n for arg in args:\n subclass_types_to_instances: dict[type[torch.Tensor], list[type[torch.Tensor]]] = get_subclass_typing_container(arg)\n for subclass_type in subclass_types_to_instances:\n if subclass_type not in tensor_type_to_old_getattribute:\n assert len(subclass_types_to_instances[subclass_type]) > 0\n instance = subclass_types_to_instances[subclass_type][0]\n attrs_to_proxy = set(dir(instance)) - set(dir(torch.Tensor))\n tensor_type_to_old_getattribute[subclass_type] = (subclass_type.__getattribute__, attrs_to_proxy)\n try:\n for k, (old_getattr, attrs_to_proxy) in tensor_type_to_old_getattribute.items():\n custom = functools.partialmethod(custom_getattribute, original_getattr=old_getattr, attrs_to_proxy=attrs_to_proxy)\n k.__getattribute__ = custom\n yield\n finally:\n for k, (old_getattr, _) in tensor_type_to_old_getattribute.items():\n k.__getattribute__ = old_getattr", + "docstring": "Context manager that temporarily monkey patches tensor.__getattribute__ so that we can intercept it at torch_function layer.", + "type": "function", + "file_path": "pytorch\\torch\\export\\_trace.py", + "ast_data": "FunctionDef name:override_getattribute_for_subclasses arg:args arguments arg For Call For If Compare Compare Call Assign Assign Call Call Call Call Assign Try For Call Assign Call Assign For Call Assign" + }, + { + "library": "tensorflow", + "name": "SymbolExposedTwiceError", + "source_code": "class SymbolExposedTwiceError(Exception):\n pass", + "docstring": "Raised when different symbols are exported with the same name.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator\\create_python_api.py", + "ast_data": "ClassDef name:SymbolExposedTwiceError" + }, + { + "library": "kornia", + "name": "conv_nxn_bn", + "source_code": "def conv_nxn_bn(inp: int, oup: int, kernal_size: int=3, stride: int=1) -> Module:\n return nn.Sequential(nn.Conv2d(inp, oup, kernal_size, stride, 1, bias=False), nn.BatchNorm2d(oup), nn.SiLU())", + "docstring": "Apply NxN Convolution with Batch Norm.", + "type": "function", + "file_path": "kornia\\kornia\\contrib\\vit_mobile.py", + "ast_data": "FunctionDef name:conv_nxn_bn arg:inp arg:oup arg:kernal_size arg:stride arguments arg arg arg arg Return return:yes Call Call Call Call" + }, + { + "library": "pytorch", + "name": "swap_module", + "source_code": "def swap_module(mod: nn.Module, mapping: dict[type[nn.Module], type[nn.Module]]) -> nn.Module:\n if type_before_parametrizations(mod) in mapping:\n sparse_mod = mapping[type_before_parametrizations(mod)]\n new_mod = sparse_mod.from_dense(mod)\n for pre_hook_fn in mod._forward_pre_hooks.values():\n new_mod.register_forward_pre_hook(pre_hook_fn)\n for hook_fn in mod._forward_hooks.values():\n new_mod.register_forward_hook(hook_fn)\n devices = {p.device for p in chain(mod.parameters(), mod.buffers())}\n assert len(devices) <= 1, f'swap_module only works with cpu or single-device CUDA modules, but got devices {devices}'\n device = next(iter(devices)) if len(devices) > 0 else None\n if device:\n new_mod.to(device)\n return new_mod\n else:\n return mod", + "docstring": "Swaps the module using from_dense according to the mapping passed in. Args: mod: input module mapping: a dictionary that maps from nn module to sparse nn module Return: The corresponding sparse module of according to mapping, created using from_dense", + "type": "function", + "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\utils.py", + "ast_data": "FunctionDef name:swap_module arg:mod arg:mapping arguments arg arg If Compare Call Assign Call Assign Call For Call Call For Call Call Assign Call Call Call Compare Call Assign Compare Call Call Call If Call Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "set_params", + "source_code": "def set_params(self, **params):\n super()._set_params('estimators', **params)\n return self", + "docstring": "Set the parameters of an estimator from the ensemble. Valid parameter keys can be listed with . Note that you can directly set the parameters of the estimators contained in . Parameters ---------- **params : keyword arguments Specific parameters using e.g. . In addition, to setting the parameters of the estimator, the individual estimator of the estimators can also be set, or can be removed by setting them to 'drop'. Returns ------- self : object Estimator instance.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_base.py", + "ast_data": "FunctionDef name:set_params arg:self arguments arg arg Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "step", + "source_code": "@abstractmethod\ndef step(self, *args, target=None, losses: Optional[list]=None, **kwargs):\n raise NotImplementedError", + "docstring": "Run one iteration of the pipeline schedule with *whole-batch* input. Will chunk the input into microbatches automatically, and go through the microbatches according to the schedule implementation. args: positional arguments to the model (as in non-pipeline case). kwargs: keyword arguments to the model (as in non-pipeline case). target: target for the loss function. losses: a list to store the losses for each microbatch.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py", + "ast_data": "FunctionDef name:step arg:self arguments arg arg arg arg arg Raise" + }, + { + "library": "matplotlib", + "name": "_move_from_center", + "source_code": "def _move_from_center(coord, centers, deltas, axmask=(True, True, True)):\n coord = np.asarray(coord)\n return coord + axmask * np.copysign(1, coord - centers) * deltas", + "docstring": "For each coordinate where *axmask* is True, move *coord* away from *centers* by *deltas*.", + "type": "function", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axis3d.py", + "ast_data": "FunctionDef name:_move_from_center arg:coord arg:centers arg:deltas arg:axmask arguments arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "math_clamp", + "source_code": "def math_clamp(x, min_, max_):\n return min(max(x, min_), max_)", + "docstring": "Clamp a value to lie within [min, max].", + "type": "function", + "file_path": "kornia\\kornia\\feature\\lightglue.py", + "ast_data": "FunctionDef name:math_clamp arg:x arg:min_ arg:max_ arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "convert_to_int_tensor", + "source_code": "def convert_to_int_tensor(tensor, name, dtype=dtypes.int32):\n tensor = ops.convert_to_tensor(tensor, name=name, preferred_dtype=dtype or dtypes.int32)\n if tensor.dtype.is_integer:\n if dtype is not None:\n tensor = gen_math_ops.cast(tensor, dtype)\n else:\n raise TypeError(f'Argument `tensor` (name: {name}) must be of type integer. Received `tensor` = {tensor} of dtype: {tensor.dtype}')\n return tensor", + "docstring": "Converts the given value to an integer Tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:convert_to_int_tensor arg:tensor arg:name arg:dtype arguments arg arg arg Assign Call BoolOp If If Compare Assign Call Raise Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_compute_dof", + "source_code": "def _compute_dof(self, kind, dz=None):\n if kind == 'user':\n if dz is None:\n raise ValueError(\"For a CubicTriInterpolator with *kind*='user', a valid *dz* argument is expected.\")\n TE = _DOF_estimator_user(self, dz=dz)\n elif kind == 'geom':\n TE = _DOF_estimator_geom(self)\n else:\n TE = _DOF_estimator_min_E(self)\n return TE.compute_dof_from_df()", + "docstring": "Compute and return nodal dofs according to kind. Parameters ---------- kind : {'min_E', 'geom', 'user'} Choice of the _DOF_estimator subclass to estimate the gradient. dz : tuple of array-likes (dzdx, dzdy), optional Used only if *kind*=user; in this case passed to the :class:. Returns ------- array-like, shape (npts, 2) Estimation of the gradient at triangulation nodes (stored as degree of freedoms of reduced-HCT triangle elements).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py", + "ast_data": "FunctionDef name:_compute_dof arg:self arg:kind arg:dz arguments arg arg arg If Compare If Compare Raise Call Assign Call If Compare Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pygame", + "name": "_addfont", + "source_code": "def _addfont(name, bold, italic, font, fontdict):\n if name not in fontdict:\n fontdict[name] = {}\n fontdict[name][bold, italic] = font", + "docstring": "insert a font and style into the font dictionary", + "type": "function", + "file_path": "pygame\\src_py\\sysfont.py", + "ast_data": "FunctionDef name:_addfont arg:name arg:bold arg:italic arg:font arg:fontdict arguments arg arg arg arg arg If Compare Assign Assign" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self):\n self.extra = ExtraLinksPage()", + "docstring": "Mount extra links page into the links page app.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\tutorial\\tut04_complex_site.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, dump_root, debug_dump_rel_path):\n path_components = os.path.normpath(debug_dump_rel_path).split(os.sep)\n self._device_name = device_path_to_device_name(path_components[0])\n base = path_components[-1]\n if base.count('_') < 3:\n raise ValueError('Dump file path does not conform to the naming pattern: %s' % base)\n self._extended_timestamp = base.split('_')[-1]\n if '-' in self._extended_timestamp:\n self._timestamp = int(self._extended_timestamp[:self._extended_timestamp.find('-')])\n else:\n self._timestamp = int(self._extended_timestamp)\n self._debug_op = base.split('_')[-2]\n self._output_slot = int(base.split('_')[-3])\n node_base_name = '_'.join(base.split('_')[:-3])\n self._node_name = '/'.join(path_components[1:-1] + [node_base_name])\n self._file_path = os.path.join(dump_root, debug_dump_rel_path)\n self._dump_size_bytes = gfile.Stat(self._file_path).length if gfile.Exists(self._file_path) else None", + "docstring": "constructor. Args: dump_root: () Debug dump root directory. This path should not include the path component that represents the device name (see also below). debug_dump_rel_path: () Path to a debug dump file, relative to the . The first item of this relative path is assumed to be a path representing the name of the device that the Tensor belongs to. See for more details on the device path. For example, suppose the debug dump root directory is and the dump file is at , then the value of the debug_dump_rel_path should be . Raises: ValueError: If the base file name of the dump file does not conform to the dump file naming pattern: ___", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:dump_root arg:debug_dump_rel_path arguments arg arg arg Assign Call Call Assign Call Assign If Compare Call Raise Call Assign Call If Compare Assign Call Call Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_sparse_true_positive_at_k", + "source_code": "def _sparse_true_positive_at_k(labels, predictions_idx, class_id=None, weights=None, name=None):\n with ops.name_scope(name, 'true_positives', (predictions_idx, labels, weights)):\n labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx, class_id)\n tp = sets.set_size(sets.set_intersection(predictions_idx, labels))\n tp = math_ops.cast(tp, dtypes.float64)\n if weights is not None:\n with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(weights, tp),)):\n weights = math_ops.cast(weights, dtypes.float64)\n tp = math_ops.multiply(tp, weights)\n return tp", + "docstring": "Calculates true positives for recall@k and precision@k. If is specified, calculate binary true positives for only. If is not specified, calculate metrics for predicted vs label classes, where is the 2nd dimension of . Args: labels: or with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and has shape [batch_size, num_labels]. [D1, ... DN] must match . predictions_idx: 1-D or higher with last dimension , top predicted classes. For rank , the first dimensions must match . class_id: Class for which we want binary metrics. weights: whose rank is either 0, or n-1, where n is the rank of . If the latter, it must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). name: Name of operation. Returns: A [D1, ... DN] of true positive counts.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py", + "ast_data": "FunctionDef name:_sparse_true_positive_at_k arg:labels arg:predictions_idx arg:class_id arg:weights arg:name arguments arg arg arg arg arg With Call Assign Call Assign Call Call Assign Call If Compare With Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_compute_regularization", + "source_code": "def _compute_regularization(self, X):\n n_samples, n_features = X.shape\n alpha_W = self.alpha_W\n alpha_H = self.alpha_W if self.alpha_H == 'same' else self.alpha_H\n l1_reg_W = n_features * alpha_W * self.l1_ratio\n l1_reg_H = n_samples * alpha_H * self.l1_ratio\n l2_reg_W = n_features * alpha_W * (1.0 - self.l1_ratio)\n l2_reg_H = n_samples * alpha_H * (1.0 - self.l1_ratio)\n return (l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H)", + "docstring": "Compute scaled regularization terms.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py", + "ast_data": "FunctionDef name:_compute_regularization arg:self arg:X arguments arg arg Assign Assign Assign Compare Assign Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_MergeGrad", + "source_code": "@ops.RegisterGradient('Merge')\ndef _MergeGrad(op, grad, _):\n input_op = op.inputs[0].op\n graph = ops.get_default_graph()\n op_ctxt = control_flow_util.GetOutputContext(input_op)\n grad_ctxt = graph._get_control_flow_context()\n if isinstance(op_ctxt, WhileContext):\n return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot)\n elif isinstance(op_ctxt, CondContext):\n pred = op_ctxt.pred\n if grad_ctxt and grad_ctxt.grad_state:\n grad_state = grad_ctxt.grad_state\n real_pred = grad_state.history_map.get(pred.name)\n if real_pred is None:\n grad_ctxt = grad_state.grad_context\n grad_ctxt.Exit()\n history_pred = grad_state.AddForwardAccumulator(pred)\n grad_ctxt.Enter()\n real_pred = grad_state.AddBackpropAccumulatedValue(history_pred, pred)\n grad_state.history_map[pred.name] = real_pred\n pred = real_pred\n return control_flow_ops._SwitchRefOrTensor(grad, pred, name='cond_grad')\n else:\n num_inputs = len(op.inputs)\n cond = [math_ops.equal(op.outputs[1], i) for i in range(num_inputs)]\n return [control_flow_ops._SwitchRefOrTensor(grad, cond[i])[1] for i in range(num_inputs)]", + "docstring": "Gradients for a Merge op are calculated using a Switch op.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_grad.py", + "ast_data": "FunctionDef name:_MergeGrad arg:op arg:grad arg:_ arguments arg arg arg Assign Assign Call Assign Call Assign Call If Call Return return:yes Call If Call Assign If BoolOp Assign Assign Call If Compare Assign Call Assign Call Call Assign Call Assign Assign Return return:yes Call Assign Call Assign Call Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "icdf", + "source_code": "def icdf(self, value):\n value = self._monotonize_cdf(value)\n value = self.base_dist.icdf(value)\n for transform in self.transforms:\n value = transform(value)\n return value", + "docstring": "Computes the inverse cumulative distribution function using transform(s) and computing the score of the base distribution.", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\transformed_distribution.py", + "ast_data": "FunctionDef name:icdf arg:self arg:value arguments arg arg Assign Call Assign Call For Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "post_compile", + "source_code": "def post_compile(self, compiled_fn, aot_config, *, runtime_metadata) -> Callable:\n return compiled_fn", + "docstring": "Given an output of the compiler, wrap it with information received from prologue. Args: compiled_fn: Callable after calling compiler_fn aot_config: AOTConfig after calling prologue runtime_metadata: ViewAndMutationMeta after calling all wrappers's pre_compile steps. Example: def wrapped_compiled_fn(args): # do something with args, aot_config, fw_metadata return compiled_fn(args) return wrapped_compiled_fn", + "type": "method", + "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\runtime_wrappers.py", + "ast_data": "FunctionDef name:post_compile arg:self arg:compiled_fn arg:aot_config arguments arg arg arg arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_cached_transform", + "source_code": "def _cached_transform(sub_pipeline, *, cache, param_name, param_value, transform_params):\n if param_name not in cache:\n if isinstance(param_value, tuple):\n cache[param_name] = tuple((sub_pipeline.transform(element, **transform_params) for element in param_value))\n else:\n cache[param_name] = sub_pipeline.transform(param_value, **transform_params)\n return cache[param_name]", + "docstring": "Transform a parameter value using a sub-pipeline and cache the result. Parameters ---------- sub_pipeline : Pipeline The sub-pipeline to be used for transformation. cache : dict The cache dictionary to store the transformed values. param_name : str The name of the parameter to be transformed. param_value : object The value of the parameter to be transformed. transform_params : dict The metadata to be used for transformation. This passed to the method of the sub-pipeline. Returns ------- transformed_value : object The transformed value of the parameter.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:_cached_transform arg:sub_pipeline arguments arg arg arg arg arg If Compare If Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_sort_zpos", + "source_code": "def set_sort_zpos(self, val):\n self._sort_zpos = val\n self.stale = True", + "docstring": "Set the position to use for z-sorting.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:set_sort_zpos arg:self arg:val arguments arg arg Assign Assign" + }, + { + "library": "scikit-learn", + "name": "split", + "source_code": "def split(self, X=None, y=None, groups=None):\n if groups is not None:\n warnings.warn(f'The groups parameter is ignored by {self.__class__.__name__}', UserWarning)\n return self._split()", + "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py", + "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg If Compare Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "active_pool", + "source_code": "@staticmethod\ndef active_pool() -> Optional[_MemPool]:\n return _MemPoolContext.active_pool()", + "docstring": "Returns the active MemPool", + "type": "method", + "file_path": "pytorch\\torch\\cuda\\memory.py", + "ast_data": "FunctionDef name:active_pool arguments Return return:yes Call" + }, + { + "library": "scrapy", + "name": "Link", + "source_code": "class Link:\n __slots__ = ['fragment', 'nofollow', 'text', 'url']\n\n def __init__(self, url: str, text: str='', fragment: str='', nofollow: bool=False):\n if not isinstance(url, str):\n got = url.__class__.__name__\n raise TypeError(f'Link urls must be str objects, got {got}')\n self.url: str = url\n self.text: str = text\n self.fragment: str = fragment\n self.nofollow: bool = nofollow\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Link):\n raise NotImplementedError\n return self.url == other.url and self.text == other.text and (self.fragment == other.fragment) and (self.nofollow == other.nofollow)\n\n def __hash__(self) -> int:\n return hash(self.url) ^ hash(self.text) ^ hash(self.fragment) ^ hash(self.nofollow)\n\n def __repr__(self) -> str:\n return f'Link(url={self.url!r}, text={self.text!r}, fragment={self.fragment!r}, nofollow={self.nofollow!r})'", + "docstring": "Link objects represent an extracted link by the LinkExtractor. Using the anchor tag sample below to illustrate the parameters:: Dont follow this one :param url: the absolute url being linked to in the anchor tag. From the sample, this is `` attribute of the anchor tag.", + "type": "class", + "file_path": "scrapy\\scrapy\\link.py", + "ast_data": "ClassDef name:Link Assign FunctionDef name:__init__ arg:self arg:url arg:text arg:fragment arg:nofollow arguments arg arg arg arg arg If Call Assign Raise Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Raise Return return:yes BoolOp Compare Compare Compare Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call Call Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "_intersection", + "source_code": "def _intersection(self, other: Index, sort: bool=False) -> Index:\n other = cast('DatetimeTimedeltaMixin', other)\n if self._can_range_setop(other):\n return self._range_intersect(other, sort=sort)\n if not self._can_fast_intersect(other):\n result = Index._intersection(self, other, sort=sort)\n result = self._wrap_setop_result(other, result)\n return result._with_freq(None)._with_freq('infer')\n else:\n return self._fast_intersect(other, sort)", + "docstring": "intersection specialized to the case with matching dtypes and both non-empty.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\datetimelike.py", + "ast_data": "FunctionDef name:_intersection arg:self arg:other arg:sort arguments arg arg arg Assign Call If Call Return return:yes Call If Call Assign Call Assign Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "PerWorkerValues", + "source_code": "@tf_export('distribute.experimental.coordinator.PerWorkerValues', 'distribute.coordinator.PerWorkerValue', v1=[])\nclass PerWorkerValues(composite_tensor.CompositeTensor):\n\n def __init__(self, values):\n for v in values:\n if not isinstance(v, remote_value.RemoteValue):\n raise AssertionError('`PerWorkerValues` should only take `RemoteValue`s.')\n self._values = tuple(values)\n\n @property\n def _type_spec(self):\n return PerWorkerValuesTypeSpec(self._values[0]._type_spec, type(self))", + "docstring": "A container that holds a list of values, one value per worker. contains a collection of values, where each of the values is located on its corresponding worker, and upon being used as one of the or of , the value specific to a worker will be passed into the function being executed at that corresponding worker. Currently, the only supported path to create an object of is through calling on a -returned distributed dataset instance. The mechanism to create a custom is not yet supported.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py", + "ast_data": "ClassDef name:PerWorkerValues FunctionDef name:__init__ arg:self arg:values arguments arg arg For If Call Raise Call Assign Call FunctionDef name:_type_spec arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "_result_distribute_with_col_rearrange", + "source_code": "def _result_distribute_with_col_rearrange(results, input, world_size, weight, pg):\n sharding_dim = weight._sharding_spec.dim\n sharding_dim_size = weight.size(sharding_dim)\n dims = list(results[0].size())\n dims[0] = sharding_dim_size\n combined_results = torch.cat(results)\n output = torch.empty(*dims, device=combined_results.device, dtype=combined_results.dtype)\n split_size = get_split_size(sharding_dim_size, world_size)\n output_split_sizes = [0] * world_size\n for idx, placement in enumerate(weight._sharding_spec.placements):\n output_split_sizes[placement.rank()] = get_chunked_dim_size(sharding_dim_size, split_size, idx)\n output = all_to_all_single(output, combined_results, output_split_sizes=output_split_sizes, group=pg)\n rearrange_columns = any((idx != placement.rank() for idx, placement in enumerate(weight._sharding_spec.placements)))\n if not rearrange_columns:\n return output\n indices = []\n for placement in weight._sharding_spec.placements:\n dim_size = output_split_sizes[placement.rank()]\n start = sum((split_size if i < placement.rank() else 0 for i, split_size in enumerate(output_split_sizes)))\n indices += list(range(start, start + dim_size))\n return output.index_select(0, torch.tensor(indices, device=output.device))", + "docstring": "For col-wise sharding of weight, we need to distribute results to each rank. We do them in this function. Note that, if the index in the Sharding Spec is not equal to the rank number, we need to do the rearrangement based on the order given by the Sharding Spec (placement). Args: results: results from ops applied to inputs from all ranks. We need to distribute them back to their original ranks. input: tensor to be applied op to. world_size: number of ranks. weight: sharded weight tensor. pg: process group. Return: column rearranged result.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\chunk_sharding_spec_ops\\_common.py", + "ast_data": "FunctionDef name:_result_distribute_with_col_rearrange arg:results arg:input arg:world_size arg:weight arg:pg arguments arg arg arg arg arg Assign Assign Call Assign Call Call Assign Assign Call Assign Call Assign Call Assign For Call Assign Call Call Assign Call Assign Call Compare Call Call If Return return:yes Assign For Assign Call Assign Call Compare Call Call Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "mm_options", + "source_code": "def mm_options(config, sym_m, sym_n, sym_k, layout):\n even_k_symbolic = sympy.gcd(sym_k, config.kwargs['BLOCK_K']) == config.kwargs['BLOCK_K']\n allow_tf32 = torch.backends.cuda.matmul.allow_tf32 and (not inductor_config.force_same_precision or (sym_m % 16 == 0 and sym_n % 16 == 0 and (sym_k % 8 == 0)))\n options_dict = dict(EVEN_K=even_k_symbolic, ALLOW_TF32=allow_tf32, USE_FAST_ACCUM=False, ACC_TYPE=acc_type(layout.dtype), num_stages=config.num_stages, num_warps=config.num_warps, **config.kwargs)\n if 'GROUP_M' not in config.kwargs:\n group_m = config.kwargs.get('GROUP_M', 8)\n options_dict['GROUP_M'] = group_m\n return options_dict", + "docstring": "Common options to matmul triton templates.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\kernel\\mm_common.py", + "ast_data": "FunctionDef name:mm_options arg:config arg:sym_m arg:sym_n arg:sym_k arg:layout arguments arg arg arg arg arg Assign Compare Call Assign BoolOp BoolOp BoolOp Compare Compare Compare Assign Call Call If Compare Assign Call Assign Return return:yes" + }, + { + "library": "django", + "name": "get_distance", + "source_code": "def get_distance(self, f, value, lookup_type):\n if not value:\n return []\n value = value[0]\n if isinstance(value, Distance):\n if f.geodetic(self.connection):\n dist_param = value.m\n else:\n dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))\n else:\n dist_param = value\n if lookup_type == 'dwithin':\n dist_param = 'distance=%s' % dist_param\n return [dist_param]", + "docstring": "Return the distance parameters given the value and the lookup type. On Oracle, geometry columns with a geodetic coordinate system behave implicitly like a geography column, and thus meters will be used as the distance parameter on them.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\oracle\\operations.py", + "ast_data": "FunctionDef name:get_distance arg:self arg:f arg:value arg:lookup_type arguments arg arg arg arg If Return return:no Assign If Call If Call Assign Assign Call Call Call Assign If Compare Assign Return return:yes" + }, + { + "library": "pandas", + "name": "_trim_zeros_complex", + "source_code": "def _trim_zeros_complex(str_complexes: ArrayLike, decimal: str='.') -> list[str]:\n real_part, imag_part = ([], [])\n for x in str_complexes:\n trimmed = re.split('(?{padded_length}}' + 'j' for real_pt, imag_pt in zip(padded_parts[:n], padded_parts[n:])]\n return padded", + "docstring": "Separates the real and imaginary parts from the complex number, and executes the _trim_zeros_float method on each of those.", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\format.py", + "ast_data": "FunctionDef name:_trim_zeros_complex arg:str_complexes arg:decimal arguments arg arg Assign For Assign Call Call Call Call Call Assign Call Assign Call If Compare Call Return return:no Assign Call Call Assign Call Return return:yes" + }, + { + "library": "sphinx", + "name": "multiply_length", + "source_code": "def multiply_length(length: str, scale: int) -> str:\n matched = re.match('^(\\\\d*\\\\.?\\\\d*)\\\\s*(\\\\S*)$', length)\n if not matched:\n return length\n if scale == 100:\n return length\n amount, unit = matched.groups()\n result = float(amount) * scale / 100\n return f'{int(result)}{unit}'", + "docstring": "Multiply *length* (width or height) by *scale*.", + "type": "function", + "file_path": "sphinx\\sphinx\\writers\\html5.py", + "ast_data": "FunctionDef name:multiply_length arg:length arg:scale arguments arg arg Assign Call If Return return:yes If Compare Return return:yes Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "subtract", + "source_code": "def subtract(a, b):\n return _maybe_static(a) - _maybe_static(b)", + "docstring": "A version of tf.subtract that eagerly evaluates if possible.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py", + "ast_data": "FunctionDef name:subtract arg:a arg:b arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "set_flags", + "source_code": "def set_flags(_enabled):\n orig_flags = (torch._C._get_nnpack_enabled(),)\n torch._C._set_nnpack_enabled(_enabled)\n return orig_flags", + "docstring": "Set if nnpack is enabled globally", + "type": "function", + "file_path": "pytorch\\torch\\backends\\nnpack\\__init__.py", + "ast_data": "FunctionDef name:set_flags arg:_enabled arguments arg Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "is_monotonic_decreasing", + "source_code": "@property\ndef is_monotonic_decreasing(self) -> Series:\n return self.apply(lambda ser: ser.is_monotonic_decreasing)", + "docstring": "Return whether each group's values are monotonically decreasing. Returns ------- Series See Also -------- SeriesGroupBy.is_monotonic_increasing : Return whether each group's values are monotonically increasing. Examples -------- >>> s = pd.Series([2, 1, 3, 4], index=[\"Falcon\", \"Falcon\", \"Parrot\", \"Parrot\"]) >>> s.groupby(level=0).is_monotonic_decreasing Falcon True Parrot False dtype: bool", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\generic.py", + "ast_data": "FunctionDef name:is_monotonic_decreasing arg:self arguments arg Return return:yes Call arguments arg" + }, + { + "library": "pandas", + "name": "read", + "source_code": "def read(self, where=None, columns=None, start: int | None=None, stop: int | None=None):\n raise NotImplementedError('WORMTable needs to implement read')", + "docstring": "read the indices and the indexing array, calculate offset rows and return", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:read arg:self arg:where arg:columns arg:start arg:stop arguments arg arg arg arg arg Raise Call" + }, + { + "library": "numpy", + "name": "clump_unmasked", + "source_code": "def clump_unmasked(a):\n mask = getattr(a, '_mask', nomask)\n if mask is nomask:\n return [slice(0, a.size)]\n return _ezclump(~mask)", + "docstring": "Return list of slices corresponding to the unmasked clumps of a 1-D array. (A \"clump\" is defined as a contiguous region of the array). Parameters ---------- a : ndarray A one-dimensional masked array. Returns ------- slices : list of slice The list of slices, one for each continuous region of unmasked elements in . See Also -------- flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges notmasked_contiguous, clump_masked Examples -------- >>> import numpy as np >>> a = np.ma.masked_array(np.arange(10)) >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked >>> np.ma.clump_unmasked(a) [slice(3, 6, None), slice(7, 8, None)]", + "type": "function", + "file_path": "numpy\\numpy\\ma\\extras.py", + "ast_data": "FunctionDef name:clump_unmasked arg:a arguments arg Assign Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "pygame", + "name": "array_alpha", + "source_code": "def array_alpha(surface):\n size = surface.get_size()\n array = numpy.empty(size, numpy.uint8)\n surface_to_array(array, surface, 'A')\n return array", + "docstring": "pygame.surfarray.array_alpha(Surface): return array copy pixel alphas into a 2d array Copy the pixel alpha values (degree of transparency) from a Surface into a 2D array. This will work for any type of Surface format. Surfaces without a pixel alpha will return an array with all opaque values. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method).", + "type": "function", + "file_path": "pygame\\src_py\\surfarray.py", + "ast_data": "FunctionDef name:array_alpha arg:surface arguments arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "StreamContext", + "source_code": "class StreamContext(AbstractContextManager):\n cur_stream: Optional[Stream]\n\n def __init__(self, stream):\n self.stream = stream\n self.prev_stream = _default_cpu_stream\n\n def __enter__(self):\n cur_stream = self.stream\n if cur_stream is None:\n return\n global _current_stream\n self.prev_stream = _current_stream\n _current_stream = cur_stream\n\n def __exit__(self, type: Any, value: Any, traceback: Any) -> None:\n cur_stream = self.stream\n if cur_stream is None:\n return\n global _current_stream\n _current_stream = self.prev_stream", + "docstring": "Context-manager that selects a given stream. N.B. This class only exists to facilitate device-agnostic code", + "type": "class", + "file_path": "pytorch\\torch\\cpu\\__init__.py", + "ast_data": "ClassDef name:StreamContext FunctionDef name:__init__ arg:self arg:stream arguments arg arg Assign Assign FunctionDef name:__enter__ arg:self arguments arg Assign If Compare Return return:no Assign Assign FunctionDef name:__exit__ arg:self arg:type arg:value arg:traceback arguments arg arg arg arg Assign If Compare Return return:no Assign" + }, + { + "library": "pytorch", + "name": "add_output_instructions", + "source_code": "def add_output_instructions(self, prefix: list[Instruction]) -> None:\n self.output_instructions.extend(prefix)\n self.should_exit = True", + "docstring": "We call this on the creation of a new compiled subgraph that is inserted before user code.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\output_graph.py", + "ast_data": "FunctionDef name:add_output_instructions arg:self arg:prefix arguments arg arg Call Assign" + }, + { + "library": "tensorflow", + "name": "_get_argspec_for_partial", + "source_code": "def _get_argspec_for_partial(obj):\n n_prune_args = len(obj.args)\n partial_keywords = obj.keywords or {}\n args, varargs, keywords, defaults = getargspec(obj.func)\n args = args[n_prune_args:]\n no_default = object()\n all_defaults = [no_default] * len(args)\n if defaults:\n all_defaults[-len(defaults):] = defaults\n for kw, default in partial_keywords.items():\n if kw in args:\n idx = args.index(kw)\n all_defaults[idx] = default\n elif not keywords:\n raise ValueError('Function does not have **kwargs parameter, but contains an unknown partial keyword.')\n first_default = next((idx for idx, x in enumerate(all_defaults) if x is not no_default), None)\n if first_default is None:\n return ArgSpec(args, varargs, keywords, None)\n invalid_default_values = [args[i] for i, j in enumerate(all_defaults) if j is no_default and i > first_default]\n if invalid_default_values:\n raise ValueError('Some arguments %s do not have default value, but they are positioned after those with default values. This can not be expressed with ArgSpec.' % invalid_default_values)\n return ArgSpec(args, varargs, keywords, tuple(all_defaults[first_default:]))", + "docstring": "Implements for objects. Args: obj: The object Returns: An Raises: ValueError: When callable's signature can not be expressed with ArgSpec.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py", + "ast_data": "FunctionDef name:_get_argspec_for_partial arg:obj arguments arg Assign Call Assign BoolOp Assign Call Assign Assign Call Assign Call If Assign Call For Call If Compare Assign Call Assign If Raise Call Assign Call Call Compare If Compare Return return:yes Call Assign Call BoolOp Compare Compare If Raise Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "toggled", + "source_code": "@property\ndef toggled(self):\n return self._toggled", + "docstring": "State of the toggled tool.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "FunctionDef name:toggled arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "SaveContext", + "source_code": "class SaveContext(threading.local):\n\n def __init__(self):\n super(SaveContext, self).__init__()\n self._in_save_context = False\n self._options = None\n\n def options(self):\n if not self.in_save_context():\n raise ValueError('Not in a SaveContext.')\n return self._options\n\n def enter_save_context(self, options):\n self._in_save_context = True\n self._options = options\n\n def exit_save_context(self):\n self._in_save_context = False\n self._options = None\n\n def in_save_context(self):\n return self._in_save_context", + "docstring": "A context for building a graph of SavedModel.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save_context.py", + "ast_data": "ClassDef name:SaveContext FunctionDef name:__init__ arg:self arguments arg Call Call Assign Assign FunctionDef name:options arg:self arguments arg If Call Raise Call Return return:yes FunctionDef name:enter_save_context arg:self arg:options arguments arg arg Assign Assign FunctionDef name:exit_save_context arg:self arguments arg Assign Assign FunctionDef name:in_save_context arg:self arguments arg Return return:yes" + }, + { + "library": "seaborn", + "name": "_native_width", + "source_code": "@property\ndef _native_width(self):\n if self.var_types[self.orient] == 'categorical':\n return 1\n unique_values = np.unique(self.comp_data[self.orient])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width", + "docstring": "Return unit of width separating categories on native numeric scale.", + "type": "method", + "file_path": "seaborn\\seaborn\\categorical.py", + "ast_data": "FunctionDef name:_native_width arg:self arguments arg If Compare Return return:yes Assign Call If Compare Call Assign Call Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "argrelmax", + "source_code": "def argrelmax(data, axis=0, order=1, mode='clip'):\n return argrelextrema(data, np.greater, axis, order, mode)", + "docstring": "Calculate the relative maxima of . Parameters ---------- data : ndarray Array in which to find the relative maxima. axis : int, optional Axis over which to select from . Default is 0. order : int, optional How many points on each side to use for the comparison to consider `numpy.takekdatadataargrelextremadatafind_peaks` can be used to detect all local maxima, including flat ones. .. versionadded:: 0.11.0 Examples -------- >>> import numpy as np >>> from scipy.signal import argrelmax >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) >>> argrelmax(x) (array([3, 6]),) >>> y = np.array([[1, 2, 1, 2], ... [2, 2, 0, 0], ... [5, 3, 4, 4]]) ... >>> argrelmax(y, axis=1) (array([0]), array([1]))", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_peak_finding.py", + "ast_data": "FunctionDef name:argrelmax arg:data arg:axis arg:order arg:mode arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "dst", + "source_code": "def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):\n return _pocketfft.dst(x, type, n, axis, norm, overwrite_x)", + "docstring": "Return the Discrete Sine Transform of arbitrary type sequence x. Parameters ---------- x : array_like The input array. type : {1, 2, 3, 4}, optional Type of the DST (see Notes). Default type is 2. n : int, optional Length of the transform. If `xxn=-1n=Nn=-1/2n=N-1/2k=-1k=N-1n=-1n=N-1n=-0.5n=N-0.5`. The orthonormalized DST-IV is exactly its own inverse. .. versionadded:: 1.2.0 Support for DST-IV. References ---------- .. [1] Wikipedia, \"Discrete sine transform\",", + "type": "function", + "file_path": "scipy\\scipy\\fftpack\\_realtransforms.py", + "ast_data": "FunctionDef name:dst arg:x arg:type arg:n arg:axis arg:norm arg:overwrite_x arguments arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "fake_tensor_unsupported", + "source_code": "def fake_tensor_unsupported(fn):\n\n @functools.wraps(fn)\n def wrapper(model, inputs, **kwargs):\n with _disable_current_modes():\n inputs = list(map(defake, inputs))\n return fn(model, inputs, **kwargs)\n return wrapper", + "docstring": "Decorator for backends that need real inputs. We swap out fake tensors for zero tensors.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\backends\\common.py", + "ast_data": "FunctionDef name:fake_tensor_unsupported arg:fn arguments arg FunctionDef name:wrapper arg:model arg:inputs arguments arg arg arg With Call Assign Call Call Return return:yes Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_metadata_routing", + "source_code": "def get_metadata_routing(self):\n return MetadataRouter(owner=self.__class__.__name__).add(**self._scorers, method_mapping=MethodMapping().add(caller='score', callee='score'))", + "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.3 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\metrics\\_scorer.py", + "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Return return:yes Call Call Call Call" + }, + { + "library": "scipy", + "name": "approx_jacobian", + "source_code": "def approx_jacobian(x, func, epsilon, *args):\n jac = approx_derivative(func, x, method='2-point', abs_step=epsilon, args=args)\n return np.atleast_2d(jac)", + "docstring": "Approximate the Jacobian matrix of a callable function. Parameters ---------- x : array_like The state vector at which to compute the Jacobian matrix. func : callable f(x,*args) The vector-valued function. epsilon : float The perturbation used to determine the partial derivatives. args : sequence Additional arguments passed to func. Returns ------- An array of dimensions `funcx`. Notes ----- The approximation is done using forward differences.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_slsqp_py.py", + "ast_data": "FunctionDef name:approx_jacobian arg:x arg:func arg:epsilon arguments arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "is_dask_namespace", + "source_code": "@lru_cache(100)\ndef is_dask_namespace(xp: Namespace) -> bool:\n return xp.__name__ in {'dask.array', _compat_module_name() + '.dask.array'}", + "docstring": "Returns True if is a Dask namespace. This includes both `` itself and the version wrapped by array-api-compat. See Also -------- array_namespace is_numpy_namespace is_cupy_namespace is_torch_namespace is_ndonnx_namespace is_jax_namespace is_pydata_sparse_namespace is_array_api_strict_namespace", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py", + "ast_data": "FunctionDef name:is_dask_namespace arg:xp arguments arg Return return:yes Compare Call Call" + }, + { + "library": "matplotlib", + "name": "get_center", + "source_code": "def get_center(self):\n return self.get_patch_transform().transform((0.5, 0.5))", + "docstring": "Return the centre of the rectangle.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:get_center arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "render_pep440_branch", + "source_code": "def render_pep440_branch(pieces):\n if pieces['closest-tag']:\n rendered = pieces['closest-tag']\n if pieces['distance'] or pieces['dirty']:\n if pieces['branch'] != 'master':\n rendered += '.dev0'\n rendered += plus_or_dot(pieces)\n rendered += f'{pieces['distance']}.g{pieces['short']}'\n if pieces['dirty']:\n rendered += '.dirty'\n else:\n rendered = '0'\n if pieces['branch'] != 'master':\n rendered += '.dev0'\n rendered += f'+untagged.{pieces['distance']}.g{pieces['short']}'\n if pieces['dirty']:\n rendered += '.dirty'\n return rendered", + "docstring": "TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The \".dev0\" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear \"older\" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]", + "type": "function", + "file_path": "pandas\\pandas\\_version.py", + "ast_data": "FunctionDef name:render_pep440_branch arg:pieces arguments arg If Assign If BoolOp If Compare Call If Assign If Compare If Return return:yes" + }, + { + "library": "tensorflow", + "name": "_unblock_model_reconstruction", + "source_code": "def _unblock_model_reconstruction(self, layer_id, layer):\n for model_id, v in self.model_layer_dependencies.items():\n _, layers = v\n if layer_id not in layers:\n continue\n layers[layers.index(layer_id)] = layer\n if all((isinstance(x, base_layer.Layer) for x in layers)):\n self._models_to_reconstruct.append(model_id)", + "docstring": "Removes layer from blocking model reconstruction.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py", + "ast_data": "FunctionDef name:_unblock_model_reconstruction arg:self arg:layer_id arg:layer arguments arg arg arg For Call Assign If Compare Assign Call If Call Call Call" + }, + { + "library": "tensorflow", + "name": "children", + "source_code": "def children(self, obj, save_type=base.SaveType.CHECKPOINT, **kwargs):\n children = {}\n for name, ref in self.list_children(obj, **kwargs):\n children[name] = ref\n return children", + "docstring": "Returns all child trackables attached to obj. Args: obj: A object. save_type: A string, can be 'savedmodel' or 'checkpoint'. **kwargs: kwargs to use when retrieving the object's children. Returns: Dictionary of all children attached to the object with name to trackable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\graph_view.py", + "ast_data": "FunctionDef name:children arg:self arg:obj arg:save_type arguments arg arg arg arg Assign For Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_OneDeviceReplicaContext", + "source_code": "class _OneDeviceReplicaContext(distribute_lib.ReplicaContext):\n\n def __init__(self, strategy):\n distribute_lib.ReplicaContext.__init__(self, strategy, replica_id_in_sync_group=0)\n\n @property\n def devices(self):\n return self._strategy.extended.worker_devices", + "docstring": "ReplicaContext for OneDeviceStrategy.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py", + "ast_data": "ClassDef name:_OneDeviceReplicaContext FunctionDef name:__init__ arg:self arg:strategy arguments arg arg Call FunctionDef name:devices arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_create_copy_for_async_checkpoint", + "source_code": "def _create_copy_for_async_checkpoint(self, feature_config, optimizer, pipeline_execution_with_tensor_core):\n return TPUEmbedding(feature_config=feature_config, optimizer=optimizer, pipeline_execution_with_tensor_core=pipeline_execution_with_tensor_core)", + "docstring": "Create a TPUEmbedding copy for checkpoint/async_checkpoint_helper.py.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py", + "ast_data": "FunctionDef name:_create_copy_for_async_checkpoint arg:self arg:feature_config arg:optimizer arg:pipeline_execution_with_tensor_core arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "gradients", + "source_code": "@doc_controls.do_not_generate_docs\ndef gradients(loss, variables):\n return gradients_module.gradients(loss, variables, colocate_gradients_with_ops=True)", + "docstring": "Returns the gradients of w.r.t. . Args: loss: Scalar tensor to minimize. variables: List of variables. Returns: A gradients tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:gradients arg:loss arg:variables arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "ExtensionTypeMetaclass", + "source_code": "class ExtensionTypeMetaclass(abc.ABCMeta):\n\n def __init__(cls, name, bases, namespace):\n if not namespace.get('_tf_extension_type_do_not_transform_this_class', False):\n _check_field_annotations(cls)\n _add_extension_type_constructor(cls)\n _add_type_spec(cls)\n super(ExtensionTypeMetaclass, cls).__init__(name, bases, namespace)", + "docstring": "Metaclass for tf.ExtensionType types.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py", + "ast_data": "ClassDef name:ExtensionTypeMetaclass FunctionDef name:__init__ arg:cls arg:name arg:bases arg:namespace arguments arg arg arg arg If Call Call Call Call Call Call" + }, + { + "library": "scipy", + "name": "inv", + "source_code": "def inv(A):\n if not (issparse(A) or is_pydata_spmatrix(A)):\n raise TypeError('Input must be a sparse arrays')\n I = _ident_like(A)\n Ainv = spsolve(A, I)\n return Ainv", + "docstring": "Compute the inverse of a sparse arrays Parameters ---------- A : (M, M) sparse arrays square matrix to be inverted Returns ------- Ainv : (M, M) sparse arrays inverse of Notes ----- This computes the sparse inverse of . If the inverse of is expected to be non-sparse, it will likely be faster to convert to dense and use . Examples -------- >>> from scipy.sparse import csc_array >>> from scipy.sparse.linalg import inv >>> A = csc_array([[1., 0.], [1., 2.]]) >>> Ainv = inv(A) >>> Ainv >>> A.dot(Ainv) >>> A.dot(Ainv).toarray() array([[ 1., 0.], [ 0., 1.]]) .. versionadded:: 0.12.0", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\linalg\\_matfuncs.py", + "ast_data": "FunctionDef name:inv arg:A arguments arg If BoolOp Call Call Raise Call Assign Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_pagecount", + "source_code": "def get_pagecount(self):\n return len(self._ensure_file().pageList)", + "docstring": "Return the current number of pages in the multipage pdf file.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py", + "ast_data": "FunctionDef name:get_pagecount arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "partition", + "source_code": "def partition(self, sep):\n return asarray(partition(self, sep))", + "docstring": "Partition each element in around . See Also -------- partition", + "type": "method", + "file_path": "numpy\\numpy\\_core\\defchararray.py", + "ast_data": "FunctionDef name:partition arg:self arg:sep arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "randint64", + "source_code": "def randint64(self, seed: T, offset: T, low: T, high: T) -> T:\n raise NotImplementedError", + "docstring": "Computes inductor_prims.randint. offset has dtype int32.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ops_handler.py", + "ast_data": "FunctionDef name:randint64 arg:self arg:seed arg:offset arg:low arg:high arguments arg arg arg arg arg Raise" + }, + { + "library": "scikit-learn", + "name": "inplace_swap_row_csc", + "source_code": "def inplace_swap_row_csc(X, m, n):\n for t in [m, n]:\n if isinstance(t, np.ndarray):\n raise TypeError('m and n should be valid integers')\n if m < 0:\n m += X.shape[0]\n if n < 0:\n n += X.shape[0]\n m_mask = X.indices == m\n X.indices[X.indices == n] = m\n X.indices[m_mask] = n", + "docstring": "Swap two rows of a CSC matrix in-place. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) Matrix whose two rows are to be swapped. It should be of CSC format. m : int Index of the row of X to be swapped. n : int Index of the row of X to be swapped.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\sparsefuncs.py", + "ast_data": "FunctionDef name:inplace_swap_row_csc arg:X arg:m arg:n arguments arg arg arg For If Call Raise Call If Compare If Compare Assign Compare Assign Compare Assign" + }, + { + "library": "pytorch", + "name": "can_realize_as_comm_buffer", + "source_code": "def can_realize_as_comm_buffer(x: ir.TensorBox, comm_buffer_type: ir.CommBufferType) -> bool:\n data = _get_data(x)\n if isinstance(data, ir.Loops):\n return True\n layout = data.get_output_spec()\n if isinstance(layout, ir.CommBufferLayout):\n return True\n if isinstance(layout, ir.FlexibleLayout) and (not is_symbolic(data.get_numel())):\n return True\n return False", + "docstring": "Check if an input can be realized as a comm buffer of the specified .", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\comm_lowering.py", + "ast_data": "FunctionDef name:can_realize_as_comm_buffer arg:x arg:comm_buffer_type arguments arg arg Assign Call If Call Return return:yes Assign Call If Call Return return:yes If BoolOp Call Call Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "_join_dimensions_cached", + "source_code": "@functools.lru_cache(256)\ndef _join_dimensions_cached(expr: Expr) -> Expr:\n assert isinstance(expr, sympy.Add)\n scale = sympy.Wild('scale', exclude=[0], integer=True)\n base = sympy.Wild('base', integer=True)\n divisor = sympy.Wild('divisor', integer=True)\n mod1 = sympy.Wild('modulus', integer=True)\n mod2 = sympy.Wild('modulus2', integer=True)\n for term1 in expr.args:\n m1 = term1.match(scale * ModularIndexing(base, divisor, mod1))\n if m1:\n for term2 in expr.args:\n m2 = term2.match(m1[scale] * m1[mod1] * ModularIndexing(m1[base], m1[divisor] * m1[mod1], mod2))\n if m2 and term1 != term2:\n expr = join_dimensions(expr - term1 - term2 + m1[scale] * ModularIndexing(m1[base], m1[divisor], m1[mod1] * m2[mod2]))\n return expr\n for term1 in expr.args:\n m1 = term1.match(scale * ModularIndexing(base, divisor, mod1))\n if m1:\n for term2 in expr.args:\n m2 = term2.match(m1[scale] * m1[mod1] * FloorDiv(m1[base], m1[divisor] * m1[mod1]))\n if m2 is not None:\n expr = join_dimensions(expr - term1 - term2 + m1[scale] * FloorDiv(m1[base], m1[divisor]))\n return expr\n return expr", + "docstring": "ModularIndexing(i0, 1, 32) + 32 * ModularIndexing(i0, 32, 4) becomes ModularIndexing(i0, 1, 128) ModularIndexing(i0, 1, 32) + 32 * FloorDiv(i0, 32) becomes i0 This type of pattern can come from view operations", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\sizevars.py", + "ast_data": "FunctionDef name:_join_dimensions_cached arg:expr arguments arg Call Assign Call Assign Call Assign Call Assign Call Assign Call For Assign Call Call If For Assign Call Call If BoolOp Compare Assign Call Call Return return:yes For Assign Call Call If For Assign Call Call If Compare Assign Call Call Return return:yes Return return:yes Call" + }, + { + "library": "django", + "name": "StrictlyAboveLookup", + "source_code": "@BaseSpatialField.register_lookup\nclass StrictlyAboveLookup(GISLookup):\n lookup_name = 'strictly_above'", + "docstring": "The 'strictly_above' operator returns true if A's bounding box is strictly above B's bounding box.", + "type": "class", + "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py", + "ast_data": "ClassDef name:StrictlyAboveLookup Assign" + }, + { + "library": "matplotlib", + "name": "set_sort_zpos", + "source_code": "def set_sort_zpos(self, val):\n self._sort_zpos = val\n self.stale = True", + "docstring": "Set the position to use for z-sorting.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:set_sort_zpos arg:self arg:val arguments arg arg Assign Assign" + }, + { + "library": "tensorflow", + "name": "get_np_doc_form", + "source_code": "def get_np_doc_form():\n return _np_doc_form", + "docstring": "Gets the form of the original numpy docstrings. Returns: See for the list of valid values.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py", + "ast_data": "FunctionDef name:get_np_doc_form arguments Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_item", + "source_code": "def get_item(target, i, opts):\n assert isinstance(opts, GetItemOpts)\n if isinstance(target, tensor_array_ops.TensorArray):\n return _tf_tensorarray_get_item(target, i)\n elif tensor_util.is_tf_type(target):\n if target.dtype == dtypes.variant:\n return _tf_tensor_list_get_item(target, i, opts)\n elif target.dtype == dtypes.string and target.shape.ndims == 0:\n return _tf_tensor_string_get_item(target, i)\n else:\n return _tf_tensor_get_item(target, i)\n else:\n return _py_get_item(target, i)", + "docstring": "The slice read operator (i.e. __getitem__). Note: it is unspecified whether target will be mutated or not. In general, if target is mutable (like Python lists), it will be mutated. Args: target: An entity that supports getitem semantics. i: Index to read from. opts: A GetItemOpts object. Returns: The read element. Raises: ValueError: if target is not of a supported type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\slices.py", + "ast_data": "FunctionDef name:get_item arg:target arg:i arg:opts arguments arg arg arg Call If Call Return return:yes Call If Call If Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "__unicode__", + "source_code": "def __unicode__(self):\n return ntou(self.__str__())", + "docstring": "Render the HTTP header value as a string.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\httputil.py", + "ast_data": "FunctionDef name:__unicode__ arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_get_small_trainset", + "source_code": "def _get_small_trainset(self, X_binned_train, y_train, sample_weight_train, seed):\n subsample_size = 10000\n if X_binned_train.shape[0] > subsample_size:\n indices = np.arange(X_binned_train.shape[0])\n stratify = y_train if is_classifier(self) else None\n indices = resample(indices, n_samples=subsample_size, replace=False, random_state=seed, stratify=stratify)\n X_binned_small_train = X_binned_train[indices]\n y_small_train = y_train[indices]\n if sample_weight_train is not None:\n sample_weight_small_train = sample_weight_train[indices]\n else:\n sample_weight_small_train = None\n X_binned_small_train = np.ascontiguousarray(X_binned_small_train)\n return (X_binned_small_train, y_small_train, sample_weight_small_train, indices)\n else:\n return (X_binned_train, y_train, sample_weight_train, slice(None))", + "docstring": "Compute the indices of the subsample set and return this set. For efficiency, we need to subsample the training set to compute scores with scorers.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py", + "ast_data": "FunctionDef name:_get_small_trainset arg:self arg:X_binned_train arg:y_train arg:sample_weight_train arg:seed arguments arg arg arg arg arg Assign If Compare Assign Call Assign Call Assign Call Assign Assign If Compare Assign Assign Assign Call Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "autograd_not_implemented_inner", + "source_code": "def autograd_not_implemented_inner(operator: OperatorBase, delayed_error: bool, *args: Any, **kwargs: Any) -> Any:\n with torch._C._AutoDispatchBelowAutograd():\n result = operator(*args, **kwargs)\n flat_operands = pytree.arg_tree_leaves(*args)\n if torch.is_grad_enabled() and any((f.requires_grad for f in flat_operands if isinstance(f, torch.Tensor))):\n if delayed_error:\n err_fn = torch._C._functions.DelayedError(f'Autograd not implemented for {str(operator)}', 1)\n\n def fake_requires_grad(tensor):\n if torch.is_floating_point(tensor) or torch.is_complex(tensor):\n tensor = tensor.detach()\n tensor.requires_grad = True\n return tensor\n return pytree.tree_map_only(torch.Tensor, lambda x: err_fn(fake_requires_grad(x)), result)\n else:\n raise RuntimeError(f'Autograd not implemented for {str(operator)}')\n return result", + "docstring": "If autograd is enabled and any of the arguments require grad this will either raise an error or return a DelayedError depending on the value of delayed. Args: operator: The Operator to call with the *args and **kwargs with op_name: The name of the Operator delayed_error: If True, return a DelayedError instead of raising an error args: The flattened operands to the Operator kwargs: The keyword arguments to the Operator Raises: RuntimeError: If autograd is enabled and any of the arguments to the Operator", + "type": "function", + "file_path": "pytorch\\torch\\_higher_order_ops\\utils.py", + "ast_data": "FunctionDef name:autograd_not_implemented_inner arg:operator arg:delayed_error arguments arg arg arg arg With Call Assign Call Assign Call If BoolOp Call Call Call If Assign Call Call FunctionDef name:fake_requires_grad arg:tensor arguments arg If BoolOp Call Call Assign Call Assign Return return:yes Return return:yes Call arguments arg Call Call Raise Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "_override_resolve_out_shapes", + "source_code": "def _override_resolve_out_shapes(self, func):\n if func.__doc__ is None:\n func.__doc__ = 'Resolve to output shapes based on relevant inputs.'\n func.__name__ = 'resolve_out_shapes'\n self._resolve_out_shapes = func", + "docstring": "Set method by decorating a function.", + "type": "method", + "file_path": "scipy\\scipy\\special\\_multiufuncs.py", + "ast_data": "FunctionDef name:_override_resolve_out_shapes arg:self arg:func arguments arg arg If Compare Assign Assign Assign" + }, + { + "library": "scipy", + "name": "_extend_mode_to_code", + "source_code": "def _extend_mode_to_code(mode, is_filter=False):\n if mode == 'nearest':\n return 0\n elif mode == 'wrap':\n return 1\n elif mode in ['reflect', 'grid-mirror']:\n return 2\n elif mode == 'mirror':\n return 3\n elif mode == 'constant':\n return 4\n elif mode == 'grid-wrap' and is_filter:\n return 1\n elif mode == 'grid-wrap':\n return 5\n elif mode == 'grid-constant' and is_filter:\n return 4\n elif mode == 'grid-constant':\n return 6\n else:\n raise RuntimeError('boundary mode not supported')", + "docstring": "Convert an extension mode to the corresponding integer code.", + "type": "function", + "file_path": "scipy\\scipy\\ndimage\\_ni_support.py", + "ast_data": "FunctionDef name:_extend_mode_to_code arg:mode arg:is_filter arguments arg arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If BoolOp Compare Return return:yes If Compare Return return:yes If BoolOp Compare Return return:yes If Compare Return return:yes Raise Call" + }, + { + "library": "pytorch", + "name": "state_dict_type", + "source_code": "@staticmethod\n@contextlib.contextmanager\ndef state_dict_type(module: nn.Module, state_dict_type: StateDictType, state_dict_config: Optional[StateDictConfig]=None, optim_state_dict_config: Optional[OptimStateDictConfig]=None) -> Generator:\n prev_state_dict_settings = FullyShardedDataParallel.set_state_dict_type(module, state_dict_type, state_dict_config, optim_state_dict_config)\n yield\n FullyShardedDataParallel.set_state_dict_type(module, prev_state_dict_settings.state_dict_type, prev_state_dict_settings.state_dict_config, prev_state_dict_settings.optim_state_dict_config)", + "docstring": "Set the `set_state_dict_typeset_state_dict_type`.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py", + "ast_data": "FunctionDef name:state_dict_type arg:module arg:state_dict_type arg:state_dict_config arg:optim_state_dict_config arguments arg arg arg arg Assign Call Call" + }, + { + "library": "matplotlib", + "name": "set_radii", + "source_code": "def set_radii(self, r):\n if np.shape(r) == (2,):\n self.a, self.b = r\n elif np.shape(r) == ():\n self.a = self.b = float(r)\n else:\n raise ValueError(\"Parameter 'r' must be one or two floats.\")\n self._path = None\n self.stale = True", + "docstring": "Set the semi-major (*a*) and semi-minor radii (*b*) of the annulus. Parameters ---------- r : float or (float, float) The radius, or semi-axes: - If float: radius of the outer circle. - If two floats: semi-major and -minor axes of outer ellipse.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:set_radii arg:self arg:r arguments arg arg If Compare Call Assign If Compare Call Assign Call Raise Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "SpatialDropout1D", + "source_code": "class SpatialDropout1D(Dropout):\n\n def __init__(self, rate, **kwargs):\n super(SpatialDropout1D, self).__init__(rate, **kwargs)\n self.input_spec = InputSpec(ndim=3)\n\n def _get_noise_shape(self, inputs):\n input_shape = array_ops.shape(inputs)\n noise_shape = (input_shape[0], 1, input_shape[2])\n return noise_shape", + "docstring": "Spatial 1D version of Dropout. This version performs the same function as Dropout, however, it drops entire 1D feature maps instead of individual elements. If adjacent frames within feature maps are strongly correlated (as is normally the case in early convolution layers) then regular dropout will not regularize the activations and will otherwise just result in an effective learning rate decrease. In this case, SpatialDropout1D will help promote independence between feature maps and should be used instead. Args: rate: Float between 0 and 1. Fraction of the input units to drop. Call arguments: inputs: A 3D tensor. training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). Input shape: 3D tensor with shape: Output shape: Same as input. References: - [Efficient Object Localization Using Convolutional Networks](", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py", + "ast_data": "ClassDef name:SpatialDropout1D FunctionDef name:__init__ arg:self arg:rate arguments arg arg arg Call Call Assign Call FunctionDef name:_get_noise_shape arg:self arg:inputs arguments arg arg Assign Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "_InfoPrinterAbstract", + "source_code": "class _InfoPrinterAbstract:\n\n def to_buffer(self, buf: WriteBuffer[str] | None=None) -> None:\n table_builder = self._create_table_builder()\n lines = table_builder.get_lines()\n if buf is None:\n buf = sys.stdout\n fmt.buffer_put_lines(buf, lines)\n\n @abstractmethod\n def _create_table_builder(self) -> _TableBuilderAbstract:\n pass", + "docstring": "Class for printing dataframe or series info.", + "type": "class", + "file_path": "pandas\\pandas\\io\\formats\\info.py", + "ast_data": "ClassDef name:_InfoPrinterAbstract FunctionDef name:to_buffer arg:self arg:buf arguments arg arg Assign Call Assign Call If Compare Assign Call FunctionDef name:_create_table_builder arg:self arguments arg" + }, + { + "library": "scipy", + "name": "_CythonSpecialMeta", + "source_code": "class _CythonSpecialMeta(type):\n\n def __new__(cls, cls_name, bases, dct):\n params = [(10, 100, 1000), ('python', 'numpy', 'cython')]\n param_names = ['N', 'api']\n\n def get_time_func(name, args):\n\n @with_attributes(params=[(name,), (args,)] + params, param_names=['name', 'argument'] + param_names)\n def func(self, name, args, N, api):\n if api == 'python':\n self.py_func(N, *args)\n elif api == 'numpy':\n self.np_func(*self.obj)\n else:\n self.cy_func(N, *args)\n func.__name__ = 'time_' + name\n return func\n for name in FUNC_ARGS.keys():\n func = get_time_func(name, FUNC_ARGS[name])\n dct[func.__name__] = func\n return type.__new__(cls, cls_name, bases, dct)", + "docstring": "Add time_* benchmarks corresponding to cython_special._bench_*_cy", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\cython_special.py", + "ast_data": "ClassDef name:_CythonSpecialMeta FunctionDef name:__new__ arg:cls arg:cls_name arg:bases arg:dct arguments arg arg arg arg Assign Assign FunctionDef name:get_time_func arg:name arg:args arguments arg arg FunctionDef name:func arg:self arg:name arg:args arg:N arg:api arguments arg arg arg arg arg If Compare Call If Compare Call Call Call Assign Return return:yes For Call Assign Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "PaddingConfigDimension", + "source_code": "class PaddingConfigDimension:\n __slots__ = ('edge_padding_low', 'edge_padding_high', 'interior_padding')\n edge_padding_low: int\n edge_padding_high: int\n interior_padding: int\n\n def __init__(self):\n self.edge_padding_low = 0\n self.edge_padding_high = 0\n self.interior_padding = 0", + "docstring": "Python representation of a xla.PaddingConfigDimension protobuf.", + "type": "class", + "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py", + "ast_data": "ClassDef name:PaddingConfigDimension Assign FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "_get_flat_param_to_fqn", + "source_code": "def _get_flat_param_to_fqn(model: torch.nn.Module) -> dict[FlatParameter, str]:\n\n def module_fn(module, prefix, tree_level, flat_param_to_fqn):\n for param_name, param in _named_parameters_with_duplicates(module, recurse=False):\n if not isinstance(param, FlatParameter):\n continue\n fqn = clean_tensor_name(prefix + param_name)\n flat_param_to_fqn[param] = fqn\n\n def return_fn(flat_param_to_fqn):\n return flat_param_to_fqn\n flat_param_to_fqn_ret: dict[FlatParameter, str] = {}\n return _apply_to_modules(model, module_fn, return_fn, [fqn for fqn, _ in _named_parameters_with_duplicates(model)], flat_param_to_fqn_ret)", + "docstring": "Constructs a mapping from `` s in the module.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_optim_utils.py", + "ast_data": "FunctionDef name:_get_flat_param_to_fqn arg:model arguments arg FunctionDef name:module_fn arg:module arg:prefix arg:tree_level arg:flat_param_to_fqn arguments arg arg arg arg For Call If Call Assign Call Assign FunctionDef name:return_fn arg:flat_param_to_fqn arguments arg Return return:yes Return return:yes Call Call" + }, + { + "library": "sphinx", + "name": "role", + "source_code": "def role(self, name: str) -> RoleFunction | None:\n if name in self._role_cache:\n return self._role_cache[name]\n if name not in self.roles:\n return None\n fullname = f'{self.name}:{name}'\n\n def role_adapter(typ: str, rawtext: str, text: str, lineno: int, inliner: Inliner, options: dict[str, Any] | None=None, content: Sequence[str]=()) -> tuple[list[Node], list[nodes.system_message]]:\n return self.roles[name](fullname, rawtext, text, lineno, inliner, options or {}, content)\n self._role_cache[name] = role_adapter\n return role_adapter", + "docstring": "Return a role adapter function that always gives the registered role its full name ('domain:name') as the first argument.", + "type": "method", + "file_path": "sphinx\\sphinx\\domains\\__init__.py", + "ast_data": "FunctionDef name:role arg:self arg:name arguments arg arg If Compare Return return:yes If Compare Return return:no Assign FunctionDef name:role_adapter arg:typ arg:rawtext arg:text arg:lineno arg:inliner arg:options arg:content arguments arg arg arg arg arg arg arg Return return:yes Call BoolOp Assign Return return:yes" + }, + { + "library": "seaborn", + "name": "__init__", + "source_code": "def __init__(self, plotter, markers=None, dashes=None, order=None):\n super().__init__(plotter)\n data = plotter.plot_data.get('style', pd.Series(dtype=float))\n if data.notna().any():\n if variable_type(data) == 'datetime':\n data = list(data)\n levels = categorical_order(data, order)\n markers = self._map_attributes(markers, levels, unique_markers(len(levels)), 'markers')\n dashes = self._map_attributes(dashes, levels, unique_dashes(len(levels)), 'dashes')\n paths = {}\n filled_markers = []\n for k, m in markers.items():\n if not isinstance(m, mpl.markers.MarkerStyle):\n m = mpl.markers.MarkerStyle(m)\n paths[k] = m.get_path().transformed(m.get_transform())\n filled_markers.append(m.is_filled())\n if any(filled_markers) and (not all(filled_markers)):\n err = 'Filled and line art markers cannot be mixed'\n raise ValueError(err)\n lookup_table = {}\n for key in levels:\n lookup_table[key] = {}\n if markers:\n lookup_table[key]['marker'] = markers[key]\n lookup_table[key]['path'] = paths[key]\n if dashes:\n lookup_table[key]['dashes'] = dashes[key]\n self.levels = levels\n self.lookup_table = lookup_table", + "docstring": "Map the levels of the variable to distinct values. Parameters ---------- # TODO add generic parameters", + "type": "method", + "file_path": "seaborn\\seaborn\\_base.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:plotter arg:markers arg:dashes arg:order arguments arg arg arg arg arg Call Call Assign Call Call If Call Call If Compare Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call Assign Assign For Call If Call Assign Call Assign Call Call Call Call Call If BoolOp Call Call Assign Raise Call Assign For Assign If Assign Assign If Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "adaptive_avg_pool2d", + "source_code": "def adaptive_avg_pool2d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(adaptive_avg_pool2d, (input,), input, output_size)\n _output_size = _list_with_default(output_size, input.size())\n return torch._C._nn.adaptive_avg_pool2d(input, _output_size)", + "docstring": "Apply a 2D adaptive average pooling over an input signal composed of several input planes. See :class: for details and output shape. Args: output_size: the target output size (single integer or double-integer tuple)", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:adaptive_avg_pool2d arg:input arg:output_size arguments arg arg If Call Return return:yes Call Assign Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "check_consistent_length", + "source_code": "def check_consistent_length(*arrays):\n lengths = [_num_samples(X) for X in arrays if X is not None]\n if len(set(lengths)) > 1:\n raise ValueError('Found input variables with inconsistent numbers of samples: %r' % [int(l) for l in lengths])", + "docstring": "Check that all arrays have consistent first dimensions. Checks whether all objects in arrays have the same shape or length. Parameters ---------- *arrays : list or tuple of input objects. Objects that will be checked for consistent length. Examples -------- >>> from sklearn.utils.validation import check_consistent_length >>> a = [1, 2, 3] >>> b = [2, 3, 4] >>> check_consistent_length(a, b)", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\validation.py", + "ast_data": "FunctionDef name:check_consistent_length arguments arg Assign Call Compare If Compare Call Call Raise Call Call" + }, + { + "library": "pytorch", + "name": "_any_str_or_dim_in_dynamic_shapes", + "source_code": "def _any_str_or_dim_in_dynamic_shapes(dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any]) -> bool:\n flat_dynamic_shapes, _ = _flatten_dynamic_shapes_to_axes(dynamic_shapes)\n if any((not isinstance(axes, (dict, list, tuple)) and axes is not None for axes in flat_dynamic_shapes)):\n return False\n for axes in flat_dynamic_shapes:\n if isinstance(axes, dict):\n for dim in axes.values():\n if isinstance(dim, (str, Dim)):\n return True\n elif isinstance(axes, (list, tuple)):\n for dim in axes:\n if isinstance(dim, (str, Dim)):\n return True\n return False", + "docstring": "Check if there is any string or Dim in the dynamic_shapes.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_dynamic_shapes.py", + "ast_data": "FunctionDef name:_any_str_or_dim_in_dynamic_shapes arg:dynamic_shapes arguments arg Assign Call If Call BoolOp Call Compare Return return:yes For If Call For Call If Call Return return:yes If Call For If Call Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "cost_complexity_pruning_path", + "source_code": "def cost_complexity_pruning_path(self, X, y, sample_weight=None):\n est = clone(self).set_params(ccp_alpha=0.0)\n est.fit(X, y, sample_weight=sample_weight)\n return Bunch(**ccp_pruning_path(est.tree_))", + "docstring": "Compute the pruning path during Minimal Cost-Complexity Pruning. See :ref: for details on the pruning process. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Internally, it will be converted to `~sklearn.utils.Bunch`.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\tree\\_classes.py", + "ast_data": "FunctionDef name:cost_complexity_pruning_path arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_sequential_split_and_maybe_inline_subgraphs", + "source_code": "def _sequential_split_and_maybe_inline_subgraphs(gm: torch.fx.GraphModule, graph_signature: Optional[ExportGraphSignature]) -> tuple[torch.fx.GraphModule, Optional[ExportGraphSignature]]:\n need_replacing = any((_is_set_grad_enabled_node(node) for node in gm.graph.nodes))\n if not need_replacing:\n return (gm, graph_signature)\n new_gm = sequential_split(gm, _is_set_grad_enabled_node)\n\n def _maybe_inline_or_replace_with_hop(node: torch.fx.Node):\n if _is_set_grad_enabled_sub_mod(node, omit_if_same_with_ambient=True):\n _replace_with_hop(node)\n else:\n _remove_set_grad_and_inline(node)\n return _sequential_split_and_maybe_inline_subgraphs_helper(new_gm, graph_signature, _maybe_inline_or_replace_with_hop)", + "docstring": "Helper function for replace_set_grad_with_hop_pass(). Split the graph module into multiple subgraphs based on the set_grad_enabled nodes. For each subgraph, decides whether to construct a HOO subgraph, or inline the calls back into the parent graph module.", + "type": "function", + "file_path": "pytorch\\torch\\_export\\passes\\replace_set_grad_with_hop_pass.py", + "ast_data": "FunctionDef name:_sequential_split_and_maybe_inline_subgraphs arg:gm arg:graph_signature arguments arg arg Assign Call Call If Return return:yes Assign Call FunctionDef name:_maybe_inline_or_replace_with_hop arg:node arguments arg If Call Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "add_select_related", + "source_code": "def add_select_related(self, fields):\n if isinstance(self.select_related, bool):\n field_dict = {}\n else:\n field_dict = self.select_related\n for field in fields:\n d = field_dict\n for part in field.split(LOOKUP_SEP):\n d = d.setdefault(part, {})\n self.select_related = field_dict", + "docstring": "Set up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True).", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\query.py", + "ast_data": "FunctionDef name:add_select_related arg:self arg:fields arguments arg arg If Call Assign Assign For Assign For Call Assign Call Assign" + }, + { + "library": "matplotlib", + "name": "_point_along_a_line", + "source_code": "def _point_along_a_line(x0, y0, x1, y1, d):\n dx, dy = (x0 - x1, y0 - y1)\n ff = d / (dx * dx + dy * dy) ** 0.5\n x2, y2 = (x0 - ff * dx, y0 - ff * dy)\n return (x2, y2)", + "docstring": "Return the point on the line connecting (*x0*, *y0*) -- (*x1*, *y1*) whose distance from (*x0*, *y0*) is *d*.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:_point_along_a_line arg:x0 arg:y0 arg:x1 arg:y1 arg:d arguments arg arg arg arg arg Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_clean_out_of_range", + "source_code": "def _clean_out_of_range(values):\n return array_ops.where_v2(math_ops.greater_equal(values, num_classes), -1 * array_ops.ones_like(values), values)", + "docstring": "Replaces by -1 any large out-of-range .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py", + "ast_data": "FunctionDef name:_clean_out_of_range arg:values arguments arg Return return:yes Call Call Call" + }, + { + "library": "kornia", + "name": "CharbonnierLoss", + "source_code": "class CharbonnierLoss(Module):\n\n def __init__(self, reduction: str='none') -> None:\n super().__init__()\n self.reduction = reduction\n\n def forward(self, img1: Tensor, img2: Tensor) -> Tensor:\n return charbonnier_loss(img1=img1, img2=img2, reduction=self.reduction)", + "docstring": "Criterion that computes the Charbonnier [2] (aka. L1-L2 [3]) loss. According to [1], we compute the Charbonnier loss as follows: .. math:: \\text{WL}(x, y) = \\sqrt{(x - y)^{2} + 1} - 1 Where: - :math: is the prediction. - :math: is the target to be regressed to. Reference: [1] [2] [3] [4] .. note:: This implementation follows the formulation by Barron [1]. Other works utilize a slightly different implementation (see [4]). Args: reduction: Specifies the reduction to apply to the output: `(*)`. - img2: the target tensor with the same shape as img1. Example: >>> criterion = CharbonnierLoss(reduction=\"mean\") >>> img1 = torch.randn(2, 3, 32, 2107, requires_grad=True) >>> img2 = torch.randn(2, 3, 32, 2107) >>> output = criterion(img1, img2) >>> output.backward()", + "type": "class", + "file_path": "kornia\\kornia\\losses\\charbonnier.py", + "ast_data": "ClassDef name:CharbonnierLoss FunctionDef name:__init__ arg:self arg:reduction arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:img1 arg:img2 arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "to_tensor_spec", + "source_code": "def to_tensor_spec(input_spec, default_dtype=None):\n default_dtype = default_dtype or backend.floatx()\n if isinstance(input_spec, InputSpec):\n dtype = input_spec.dtype or default_dtype\n return tensor_spec.TensorSpec(to_tensor_shape(input_spec), dtype)\n return tensor_spec.TensorSpec(None, default_dtype)", + "docstring": "Converts a Keras InputSpec object to a TensorSpec.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\input_spec.py", + "ast_data": "FunctionDef name:to_tensor_spec arg:input_spec arg:default_dtype arguments arg arg Assign BoolOp Call If Call Assign BoolOp Return return:yes Call Call Return return:yes Call" + }, + { + "library": "pygame", + "name": "draw", + "source_code": "def draw(self, surface, bgsurf=None, special_flags=0):\n spritedict = self.spritedict\n surface_blit = surface.blit\n dirty = self.lostsprites\n self.lostsprites = []\n dirty_append = dirty.append\n init_rect = self._init_rect\n for spr in self.sprites():\n rec = spritedict[spr]\n newrect = surface_blit(spr.image, spr.rect, None, special_flags)\n if rec is init_rect:\n dirty_append(newrect)\n elif newrect.colliderect(rec):\n dirty_append(newrect.union(rec))\n else:\n dirty_append(newrect)\n dirty_append(rec)\n spritedict[spr] = newrect\n return dirty", + "docstring": "draw all sprites in the right order onto the passed surface LayeredUpdates.draw(surface, special_flags=0): return Rect_list", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:draw arg:self arg:surface arg:bgsurf arg:special_flags arguments arg arg arg arg Assign Assign Assign Assign Assign Assign For Call Assign Assign Call If Compare Call If Call Call Call Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, learning_rate, global_step, initial_gradient_squared_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0, use_locking=False, name='AdagradDA'):\n if initial_gradient_squared_accumulator_value <= 0.0:\n raise ValueError('initial_gradient_squared_accumulator_value must be positive: %s' % initial_gradient_squared_accumulator_value)\n super(AdagradDAOptimizer, self).__init__(use_locking, name)\n self._learning_rate = learning_rate\n self._initial_gradient_squared_accumulator_value = initial_gradient_squared_accumulator_value\n self._learning_rate_tensor = None\n self._l1_regularization_strength = l1_regularization_strength\n self._l2_regularization_strength = l2_regularization_strength\n self._global_step = global_step\n self._global_step_on_worker = None", + "docstring": "Construct a new AdagradDA optimizer. Args: learning_rate: A or a floating point value. The learning rate. global_step: A containing the current training step number. initial_gradient_squared_accumulator_value: A floating point value. Starting value for the accumulators, must be positive. l1_regularization_strength: A float value, must be greater than or equal to zero. l2_regularization_strength: A float value, must be greater than or equal to zero. use_locking: If use locks for update operations. name: Optional name prefix for the operations created when applying gradients. Defaults to \"AdagradDA\". Raises: ValueError: If the is invalid.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\adagrad_da.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:learning_rate arg:global_step arg:initial_gradient_squared_accumulator_value arg:l1_regularization_strength arg:l2_regularization_strength arg:use_locking arg:name arguments arg arg arg arg arg arg arg arg If Compare Raise Call Call Call Assign Assign Assign Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "_make_iterator", + "source_code": "def _make_iterator(self):\n if not self._worker:\n raise ValueError('Worker device must be specified when creating an owned iterator.')\n if _should_use_multi_device_iterator(self._options):\n self._create_owned_multi_device_iterator()\n else:\n with ops.device(self._worker):\n self._iterator = iter(self._dataset)", + "docstring": "Make appropriate iterator on the dataset.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py", + "ast_data": "FunctionDef name:_make_iterator arg:self arguments arg If Raise Call If Call Call With Call Assign Call" + }, + { + "library": "tensorflow", + "name": "_merge_nrows", + "source_code": "def _merge_nrows(nrows, static_nrows, value, dtype, validate):\n static_value_nrows = tensor_shape.dimension_at_index(value.shape, 0)\n if isinstance(value, tensor.Tensor):\n value_nrows = array_ops.shape(value, out_type=dtype)[0]\n else:\n value_nrows = value.nrows()\n if nrows is None:\n nrows = value_nrows\n elif static_value_nrows.value is not None and static_nrows.value is not None:\n if not static_value_nrows.is_compatible_with(static_nrows):\n raise ValueError('fields have incompatible nrows')\n nrows = value_nrows\n elif validate:\n nrows = control_flow_ops.with_dependencies([check_ops.assert_equal(nrows, value_nrows, message='fields have incompatible nrows')], nrows)\n return (nrows, static_nrows._merge_with(static_value_nrows))", + "docstring": "Merges with . Checks that has the expected number of rows (), and returns . If is true, then add validation ops that check that the values match. Args: nrows: scalar integer Tensor. static_nrows: tf.Dimension: static value of nrows, if known. value: Tensor or RaggedTensor or StructuredTensor dtype: dtype for . validate: bool -- whether to add validation ops. Returns: A tuple .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py", + "ast_data": "FunctionDef name:_merge_nrows arg:nrows arg:static_nrows arg:value arg:dtype arg:validate arguments arg arg arg arg arg Assign Call If Call Assign Call Assign Call If Compare Assign If BoolOp Compare Compare If Call Raise Call Assign If Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "spence", + "source_code": "@tf_export('math.special.spence')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef spence(x, name=None):\n with ops.name_scope(name, 'spence', [x]):\n return gen_special_math_ops.spence(x)", + "docstring": "Computes Spence's integral of element-wise. Spence's integral is defined as the integral of from to , with the domain of definition all non-negative real numbers. >>> tf.math.special.spence([0.5, 1., 2., 3.]).numpy() array([ 0.58224034, 0. , -0.82246685, -1.4367464], dtype=float32) This implementation is based off of the Cephes math library. Args: x: A or . Must be one of the following types: , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.spence @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py", + "ast_data": "FunctionDef name:spence arg:x arg:name arguments arg arg With Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "initial_scale", + "source_code": "@property\ndef initial_scale(self):\n if isinstance(self._loss_scale, _DynamicLossScaleState):\n return self._loss_scale.initial_loss_scale\n else:\n return self._loss_scale", + "docstring": "The initial loss scale. If is False, this is the same number as , as the loss scale never changes.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py", + "ast_data": "FunctionDef name:initial_scale arg:self arguments arg If Call Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "old_bound_to_new", + "source_code": "def old_bound_to_new(bounds):\n lb, ub = zip(*bounds)\n lb = np.array([float(_arr_to_scalar(x)) if x is not None else -np.inf for x in lb])\n ub = np.array([float(_arr_to_scalar(x)) if x is not None else np.inf for x in ub])\n return (lb, ub)", + "docstring": "Convert the old bounds representation to the new one. The new representation is a tuple (lb, ub) and the old one is a list containing n tuples, ith containing lower and upper bound on a ith variable. If any of the entries in lb/ub are None they are replaced by -np.inf/np.inf.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_constraints.py", + "ast_data": "FunctionDef name:old_bound_to_new arg:bounds arguments arg Assign Call Assign Call Compare Call Call Assign Call Compare Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_assert_nodes_are_present", + "source_code": "def _assert_nodes_are_present(name_to_node, nodes):\n for d in nodes:\n assert d in name_to_node, '%s is not in graph' % d", + "docstring": "Assert that nodes are present in the graph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_util_impl.py", + "ast_data": "FunctionDef name:_assert_nodes_are_present arg:name_to_node arg:nodes arguments arg arg For Compare" + }, + { + "library": "pytorch", + "name": "get_default_args_for_class", + "source_code": "def get_default_args_for_class(cls):\n methods = inspect.getmembers(cls, predicate=lambda m: (inspect.ismethod(m) or inspect.isfunction(m)) and (not is_static_fn(cls, m.__name__)) and (m.__name__ in cls.__dict__))\n defaults = {method_name: get_default_args(method_impl) for method_name, method_impl in methods}\n return defaults", + "docstring": "Get default arguments for all methods in a class (except for static methods). Args: cls: type - The class type to inspect for default arguments. Returns: A Dict[str, Dict[str, Any]] which maps each method name to a Dict[str, Any] that maps each argument name to its default value.", + "type": "function", + "file_path": "pytorch\\torch\\jit\\frontend.py", + "ast_data": "FunctionDef name:get_default_args_for_class arg:cls arguments arg Assign Call arguments arg BoolOp BoolOp Call Call Call Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_device_dict_and_cores", + "source_code": "@staticmethod\ndef _get_device_dict_and_cores(devices):\n device_map = collections.defaultdict(list)\n num_cores = 0\n for device in devices:\n match = _TPU_DEVICE_REGEX.match(device.name)\n if match:\n host_id = match.group('host_id')\n core_id = match.group('core_id')\n device_map[host_id].append(core_id)\n num_cores += 1\n return DeviceDetails(device_map, num_cores)", + "docstring": "Returns a dict of hosts to cores and total cores given devices names. Returns a namedtuple with two attributes: device_map: A map of host_ids to a list of core_ids. total_cores: The total number of cores within the TPU system. Args: devices: A list of devices returned by session.list_devices()", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py", + "ast_data": "FunctionDef name:_get_device_dict_and_cores arg:devices arguments arg Assign Call Assign For Assign Call If Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "make_png", + "source_code": "@classmethod\ndef make_png(cls, tex, fontsize, dpi):\n basefile = cls.get_basefile(tex, fontsize, dpi)\n pngfile = '%s.png' % basefile\n if not os.path.exists(pngfile):\n dvifile = cls.make_dvi(tex, fontsize)\n cmd = ['dvipng', '-bg', 'Transparent', '-D', str(dpi), '-T', 'tight', '-o', pngfile, dvifile]\n if getattr(mpl, '_called_from_pytest', False) and mpl._get_executable_info('dvipng').raw_version != '1.16':\n cmd.insert(1, '--freetype0')\n cls._run_checked_subprocess(cmd, tex)\n return pngfile", + "docstring": "Generate a png file containing latex's rendering of tex string. Return the file name.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py", + "ast_data": "FunctionDef name:make_png arg:cls arg:tex arg:fontsize arg:dpi arguments arg arg arg arg Assign Call Assign If Call Assign Call Assign Call If BoolOp Call Compare Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_normalize_pred", + "source_code": "def _normalize_pred(pred):\n pred = ops.convert_to_tensor(pred)\n if tensor_util.is_tf_type(pred) and (pred.shape.dims is None or pred.shape.dims):\n pred = array_ops.squeeze_v2(pred)\n return pred", + "docstring": "Normalize the predicate to a scalar tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py", + "ast_data": "FunctionDef name:_normalize_pred arg:pred arguments arg Assign Call If BoolOp Call BoolOp Compare Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "resolve", + "source_code": "@deprecated('`resolve()` is deprecated, use `dispatch(*types)`', category=FutureWarning)\ndef resolve(self, types):\n return self.dispatch(*types)", + "docstring": "Determine appropriate implementation for this type signature .. deprecated:: 0.4.4 Use `` instead", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\dispatcher.py", + "ast_data": "FunctionDef name:resolve arg:self arg:types arguments arg arg Return return:yes Call Call" + }, + { + "library": "authlib", + "name": "get_jwks", + "source_code": "def get_jwks(self):\n raise NotImplementedError()", + "docstring": "Return the JWKs that will be used to check the JWT access token signature. Developers MUST re-implement this method:: def get_jwks(self): return load_jwks(\"jwks.json\")", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc9068\\introspection.py", + "ast_data": "FunctionDef name:get_jwks arg:self arguments arg Raise Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, key_dtype, value_dtype, default_value, name='MutableHashTable', checkpoint=True, experimental_is_anonymous=False):\n self._default_value = ops.convert_to_tensor(default_value, dtype=value_dtype)\n self._value_shape = self._default_value.get_shape()\n self._checkpoint = checkpoint\n self._key_dtype = key_dtype\n self._value_dtype = value_dtype\n self._name = name\n self._is_anonymous = experimental_is_anonymous\n if not self._is_anonymous:\n self._shared_name = None\n if context.executing_eagerly():\n self._shared_name = 'table_%d' % (ops.uid(),)\n super(MutableHashTable, self).__init__(key_dtype, value_dtype)\n self._resource_handle = self._create_resource()\n if checkpoint:\n saveable = MutableHashTable._Saveable(self, name)\n if not context.executing_eagerly():\n ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)", + "docstring": "Creates an empty object. Creates a table, the type of its keys and values are specified by key_dtype and value_dtype, respectively. Args: key_dtype: the type of the key tensors. value_dtype: the type of the value tensors. default_value: The value to use if a key is missing in the table. name: A name for the operation (optional). checkpoint: if True, the contents of the table are saved to and restored from checkpoints. If is empty for a checkpointed table, it is shared using the table node name. experimental_is_anonymous: Whether to use anonymous mode for the table (default is False). In anonymous mode, the table resource can only be accessed via a resource handle. It can't be looked up by a name. When all resource handles pointing to that resource are gone, the resource will be deleted automatically. Returns: A object. Raises: ValueError: If checkpoint is True and no name was specified.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:key_dtype arg:value_dtype arg:default_value arg:name arg:checkpoint arg:experimental_is_anonymous arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Assign Assign Assign Assign If Assign If Call Assign Call Call Call Assign Call If Assign Call If Call Call" + }, + { + "library": "tensorflow", + "name": "captures", + "source_code": "@property\ndef captures(self) -> collections.OrderedDict:\n return self._captures", + "docstring": "Returns an ordered mapping of capture id to type.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py", + "ast_data": "FunctionDef name:captures arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "disable_multi_worker", + "source_code": "def disable_multi_worker(method):\n\n def _method_wrapper(self, *args, **kwargs):\n if self._in_multi_worker_mode():\n raise ValueError('{} is not supported in multi-worker mode.'.format(method.__name__))\n return method(self, *args, **kwargs)\n return tf_decorator.make_decorator(target=method, decorator_func=_method_wrapper)", + "docstring": "Decorator that disallows multi-worker use of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", + "ast_data": "FunctionDef name:disable_multi_worker arg:method arguments arg FunctionDef name:_method_wrapper arg:self arguments arg arg arg If Call Raise Call Call Return return:yes Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "toflex", + "source_code": "def toflex(self):\n ddtype = self.dtype\n _mask = self._mask\n if _mask is None:\n _mask = make_mask_none(self.shape, ddtype)\n mdtype = self._mask.dtype\n record = np.ndarray(shape=self.shape, dtype=[('_data', ddtype), ('_mask', mdtype)])\n record['_data'] = self._data\n record['_mask'] = self._mask\n return record", + "docstring": "Transforms a masked array into a flexible-type array. The flexible type array that is returned will have two fields: * the `ndarrayndarray`, ...) will be lost. Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( data=[[1, --, 3], [--, 5, --], [7, --, 9]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.toflex() array([[(1, False), (2, True), (3, False)], [(4, True), (5, False), (6, True)], [(7, False), (8, True), (9, False)]], dtype=[('_data', ' bytes:\n curve = public_key.curve\n if curve.name not in _ECDSA_KEY_TYPE:\n raise ValueError(f'Unsupported curve for ssh private key: {curve.name!r}')\n return _ECDSA_KEY_TYPE[curve.name]", + "docstring": "Return SSH key_type and curve_name for private key.", + "type": "function", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py", + "ast_data": "FunctionDef name:_ecdsa_key_type arg:public_key arguments arg Assign If Compare Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_gen_gradient_func", + "source_code": "def _gen_gradient_func(func):\n\n def gradient_func(unused_op, *result_grads):\n\n def none_to_zero(x, t):\n if x is not None:\n return x\n shape, dtype = default_gradient.shape_and_dtype(t)\n if shape.is_fully_defined():\n return default_gradient.zeros_like(t)\n dims = []\n if shape.rank is not None:\n dims = [1 if d is None else d for d in shape.as_list()]\n return array_ops.zeros(dims, dtype)\n result_grads = [none_to_zero(x, t) for x, t in zip(result_grads, func.graph.inputs)]\n return func(*result_grads)\n return gradient_func", + "docstring": "Wraps a deserialized function.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_deserialization.py", + "ast_data": "FunctionDef name:_gen_gradient_func arg:func arguments arg FunctionDef name:gradient_func arg:unused_op arguments arg arg FunctionDef name:none_to_zero arg:x arg:t arguments arg arg If Compare Return return:yes Assign Call If Call Return return:yes Call Assign If Compare Assign Compare Call Return return:yes Call Assign Call Call Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "compilation_metric", + "source_code": "@staticmethod\ndef compilation_metric(overwrite: bool=False, **metadata: object):\n CompileEventLogger.add_toplevel(CompileEventLogLevel.COMPILATION_METRIC, overwrite, **metadata)", + "docstring": "Add to the CompilationMetrics context. Also logs to PT2 Compile Events and chromium. Each key/value of metadata will appear in the chromium trace. Each kwarg name becomes a column in PT2 Compile Events and Dynamo Compile, with the corresponding kwarg value.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:compilation_metric arg:overwrite arguments arg arg Call" + }, + { + "library": "tensorflow", + "name": "get_config", + "source_code": "def get_config(self):\n config = dict(zip(self._fields, self))\n config['normalizer_fn'] = serialization._serialize_keras_object(self.normalizer_fn)\n config['dtype'] = self.dtype.name\n return config", + "docstring": "See 'FeatureColumn` base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Call Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "StrongHashSpec", + "source_code": "class StrongHashSpec(HasherSpec):\n __slots__ = ()\n\n def __new__(cls, key):\n if len(key) != 2:\n raise ValueError(f'`key` must have size 2, received {len(key)}')\n if not isinstance(key[0], compat_util.integral_types) or not isinstance(key[1], compat_util.integral_types):\n raise TypeError('Invalid key %s. Must be unsigned integer values.' % key)\n return super(cls, StrongHashSpec).__new__(cls, 'stronghash', key)", + "docstring": "A structure to specify a key of the strong keyed hash spec. The strong hash requires a , which is a list of 2 unsigned integer numbers. These should be non-zero; random numbers generated from random.org would be a fine choice. Fields: key: The key to be used by the keyed hashing function.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "ClassDef name:StrongHashSpec Assign FunctionDef name:__new__ arg:cls arg:key arguments arg arg If Compare Call Raise Call Call If BoolOp Call Call Raise Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "update_state", + "source_code": "def update_state(self, y_true, y_pred, sample_weight=None):\n y_true = self._conform_to_outputs(y_pred, y_true)\n sample_weight = self._conform_to_outputs(y_pred, sample_weight)\n if not self._built:\n self.build(y_pred, y_true)\n y_pred = nest.flatten(y_pred)\n y_true = nest.flatten(y_true) if y_true is not None else []\n sample_weight = nest.flatten(sample_weight)\n zip_args = (y_true, y_pred, sample_weight, self._metrics, self._weighted_metrics)\n for y_t, y_p, sw, metric_objs, weighted_metric_objs in zip(*zip_args):\n if y_t is None or (all((m is None for m in metric_objs)) and all((wm is None for wm in weighted_metric_objs))):\n continue\n y_t, y_p, sw = match_dtype_and_rank(y_t, y_p, sw)\n mask = get_mask(y_p)\n sw = apply_mask(y_p, sw, mask)\n for metric_obj in metric_objs:\n if metric_obj is None:\n continue\n metric_obj.update_state(y_t, y_p, sample_weight=mask)\n for weighted_metric_obj in weighted_metric_objs:\n if weighted_metric_obj is None:\n continue\n weighted_metric_obj.update_state(y_t, y_p, sample_weight=sw)", + "docstring": "Updates the state of per-output metrics.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py", + "ast_data": "FunctionDef name:update_state arg:self arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg arg Assign Call Assign Call If Call Assign Call Assign Compare Call Assign Call Assign For Call If BoolOp Compare BoolOp Call Compare Call Compare Assign Call Assign Call Assign Call For If Compare Call For If Compare Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, ax, *args, **kwargs):\n super().__init__(ax, *args, **kwargs)", + "docstring": "Draw triangular grid contour lines or filled regions, depending on whether keyword arg *filled* is False (default) or True. The first argument of the initializer must be an object. The remaining arguments and keyword arguments are described in the docstring of .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_tricontour.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:ax arguments arg arg arg arg Call Call" + }, + { + "library": "authlib", + "name": "prepare_headers", + "source_code": "def prepare_headers(oauth_params, headers=None, realm=None):\n headers = headers or {}\n header_parameters = ', '.join([f'{escape(k)}=\"{escape(v)}\"' for k, v in oauth_params if k.startswith('oauth_')])\n if realm:\n header_parameters = f'realm=\"{realm}\", ' + header_parameters\n headers['Authorization'] = f'OAuth {header_parameters}'\n return headers", + "docstring": "**Prepare the Authorization header.** Per _ of the spec. Protocol parameters can be transmitted using the HTTP \"Authorization\" header field as defined by _ with the auth-scheme name set to \"OAuth\" (case insensitive). For example:: Authorization: OAuth realm=\"Photos\", oauth_consumer_key=\"dpf43f3p2l4k3l03\", oauth_signature_method=\"HMAC-SHA1\", oauth_timestamp=\"137131200\", oauth_nonce=\"wIjqoS\", oauth_callback=\"http%3A%2F%2Fprinter.example.com%2Fready\", oauth_signature=\"74KNZJeDHnMBp0EMJ9ZHt%2FXKycU%3D\", oauth_version=\"1.0\" .. _: .. _:", + "type": "function", + "file_path": "authlib\\authlib\\oauth1\\rfc5849\\parameters.py", + "ast_data": "FunctionDef name:prepare_headers arg:oauth_params arg:headers arg:realm arguments arg arg arg Assign BoolOp Assign Call Call Call Call If Assign Assign Return return:yes" + }, + { + "library": "sphinx", + "name": "write_doc_serialized", + "source_code": "def write_doc_serialized(self, docname: str, doctree: nodes.document) -> None:\n pass", + "docstring": "Handle parts of write_doc that must be called in the main process if parallel build is active.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\__init__.py", + "ast_data": "FunctionDef name:write_doc_serialized arg:self arg:docname arg:doctree arguments arg arg arg" + }, + { + "library": "matplotlib", + "name": "set_child", + "source_code": "def set_child(self, child):\n self._child = child\n if child is not None:\n child.axes = self.axes\n self.stale = True", + "docstring": "Set the child to be anchored.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", + "ast_data": "FunctionDef name:set_child arg:self arg:child arguments arg arg Assign If Compare Assign Assign" + }, + { + "library": "kornia", + "name": "trans_z", + "source_code": "@classmethod\ndef trans_z(cls, z: Tensor) -> Se3:\n zs = zeros_like(z)\n return cls.trans(zs, zs, z)", + "docstring": "Construct a z-axis translation. Args: z: the z-axis translation.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py", + "ast_data": "FunctionDef name:trans_z arg:cls arg:z arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "global_variables_initializer", + "source_code": "@tf_export(v1=['initializers.global_variables', 'global_variables_initializer'])\ndef global_variables_initializer():\n if context.executing_eagerly():\n return control_flow_ops.no_op(name='global_variables_initializer')\n return variables_initializer(global_variables())", + "docstring": "Returns an Op that initializes global variables. This is just a shortcut for @compatibility(TF2) In TF2, variables are initialized immediately when they are created. There is no longer a need to run variable initializers before using them. @end_compatibility Returns: An Op that initializes global variables in the graph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:global_variables_initializer arguments If Call Return return:yes Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_list_profile_filter", + "source_code": "def _list_profile_filter(profile_datum, node_name_regex, file_path_regex, op_type_regex, op_time_interval, exec_time_interval, min_lineno=-1, max_lineno=-1):\n if node_name_regex and (not node_name_regex.match(profile_datum.node_exec_stats.node_name)):\n return False\n if file_path_regex:\n if not profile_datum.file_path or not file_path_regex.match(profile_datum.file_path):\n return False\n if min_lineno > 0 and profile_datum.line_number and (profile_datum.line_number < min_lineno):\n return False\n if max_lineno > 0 and profile_datum.line_number and (profile_datum.line_number >= max_lineno):\n return False\n if profile_datum.op_type is not None and op_type_regex and (not op_type_regex.match(profile_datum.op_type)):\n return False\n if op_time_interval is not None and (not op_time_interval.contains(profile_datum.op_time)):\n return False\n if exec_time_interval and (not exec_time_interval.contains(profile_datum.node_exec_stats.all_end_rel_micros)):\n return False\n return True", + "docstring": "Filter function for list_profile command. Args: profile_datum: A object. node_name_regex: Regular expression pattern object to filter by name. file_path_regex: Regular expression pattern object to filter by file path. op_type_regex: Regular expression pattern object to filter by op type. op_time_interval: for filtering op time. exec_time_interval: for filtering exec time. min_lineno: Lower bound for 1-based line number, inclusive. If <= 0, has no effect. max_lineno: Upper bound for 1-based line number, exclusive. If <= 0, has no effect. # TODO(cais): Maybe filter by function name. Returns: True iff profile_datum should be included.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\profile_analyzer_cli.py", + "ast_data": "FunctionDef name:_list_profile_filter arg:profile_datum arg:node_name_regex arg:file_path_regex arg:op_type_regex arg:op_time_interval arg:exec_time_interval arg:min_lineno arg:max_lineno arguments arg arg arg arg arg arg arg arg If BoolOp Call Return return:yes If If BoolOp Call Return return:yes If BoolOp Compare Compare Return return:yes If BoolOp Compare Compare Return return:yes If BoolOp Compare Call Return return:yes If BoolOp Compare Call Return return:yes If BoolOp Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "DistributedDatasetSpec", + "source_code": "class DistributedDatasetSpec(DistributedDatasetAndIteratorSpec):\n\n @property\n def value_type(self):\n return DistributedDataset\n\n @property\n def _component_specs(self):\n specs = []\n worker_device_pairs = self._input_workers._worker_device_pairs\n for i, _ in enumerate(worker_device_pairs):\n element_spec = nest.map_structure(functools.partial(_replace_per_replica_spec, i=i), self._element_spec)\n specs.append(dataset_ops.DatasetSpec(element_spec))\n return specs\n\n def _to_components(self, value):\n return value._cloned_datasets\n\n def _from_components(self, components):\n return DistributedDataset(input_workers=self._input_workers, strategy=self._strategy, components=components, element_spec=self._element_spec, enable_get_next_as_optional=self._enable_get_next_as_optional, options=self._options, replica_order=self._replica_order)\n\n @staticmethod\n def from_value(value):\n return DistributedDatasetSpec(value._input_workers, value._element_spec, value._strategy, value._options, enable_get_next_as_optional=value._enable_get_next_as_optional)", + "docstring": "Type specification for `DistributedDataset.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py", + "ast_data": "ClassDef name:DistributedDatasetSpec FunctionDef name:value_type arg:self arguments arg Return return:yes FunctionDef name:_component_specs arg:self arguments arg Assign Assign For Call Assign Call Call Call Call Return return:yes FunctionDef name:_to_components arg:self arg:value arguments arg arg Return return:yes FunctionDef name:_from_components arg:self arg:components arguments arg arg Return return:yes Call FunctionDef name:from_value arg:value arguments arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "__init__", + "source_code": "def __init__(self, dim=None, seed=None):\n self._dist = uniform_direction_gen(seed)\n self.dim = self._dist._process_parameters(dim)", + "docstring": "Create a frozen n-dimensional uniform direction distribution. Parameters ---------- dim : int Dimension of matrices seed : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `seedseed` instance then that instance is used. Examples -------- >>> from scipy.stats import uniform_direction >>> x = uniform_direction(3) >>> x.rvs()", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:dim arg:seed arguments arg arg arg Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "_CTCLossV2Grad", + "source_code": "@ops.RegisterGradient('CTCLossV2')\ndef _CTCLossV2Grad(op, grad_loss, _):\n return _CTCLossGradImpl(op, grad_loss, _)", + "docstring": "The derivative provided by CTC Loss V2. Args: op: the CTCLossV2 op. grad_loss: The backprop for cost. Returns: The CTC Loss V2 gradient.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ctc_ops.py", + "ast_data": "FunctionDef name:_CTCLossV2Grad arg:op arg:grad_loss arg:_ arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "authlib", + "name": "get_authorization_code_challenge_method", + "source_code": "def get_authorization_code_challenge_method(self, authorization_code):\n return authorization_code.code_challenge_method", + "docstring": "Get \"code_challenge_method\" associated with this authorization code. Developers MAY re-implement it in subclass, the default logic:: def get_authorization_code_challenge_method(self, authorization_code): return authorization_code.code_challenge_method :param authorization_code: the instance of authorization_code", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7636\\challenge.py", + "ast_data": "FunctionDef name:get_authorization_code_challenge_method arg:self arg:authorization_code arguments arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_start", + "source_code": "@abc.abstractmethod\ndef _start(self) -> None:\n raise NotImplementedError", + "docstring": "Start processes using strategy defined in a particular context.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py", + "ast_data": "FunctionDef name:_start arg:self arguments arg Raise" + }, + { + "library": "pandas", + "name": "__len__", + "source_code": "def __len__(self) -> int:\n return len(self._pa_array)", + "docstring": "Length of this array. Returns ------- length : int", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\string_arrow.py", + "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "calculate_out_dimension", + "source_code": "def calculate_out_dimension(d_in, module_instance, index):\n padding = (module_instance.padding, module_instance.padding) if isinstance(module_instance.padding, int) else module_instance.padding\n kernel_size = (module_instance.kernel_size, module_instance.kernel_size) if isinstance(module_instance.kernel_size, int) else module_instance.kernel_size\n stride = (module_instance.stride, module_instance.stride) if isinstance(module_instance.stride, int) else module_instance.stride\n dilation = (module_instance.dilation, module_instance.dilation) if isinstance(module_instance.dilation, int) else module_instance.dilation\n DIMENSION_TYPES = (int, sympy.Symbol)\n if d_in == Dyn:\n return Dyn\n elif isinstance(d_in, DIMENSION_TYPES):\n n = d_in + 2 * padding[index] - dilation[index] * (kernel_size[index] - 1) - 1\n return n // stride[0] + 1\n else:\n raise TypeError(f'{d_in} in {module_instance} must be a number or Dyn. Received {type(d_in)}')", + "docstring": "For calculating h_in and w_out according to the conv2D documentation", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py", + "ast_data": "FunctionDef name:calculate_out_dimension arg:d_in arg:module_instance arg:index arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign If Compare Return return:yes If Call Assign Return return:yes Raise Call Call" + }, + { + "library": "matplotlib", + "name": "plasma", + "source_code": "def plasma() -> None:\n set_cmap('plasma')", + "docstring": "Set the colormap to 'plasma'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", + "ast_data": "FunctionDef name:plasma arguments Call" + }, + { + "library": "tensorflow", + "name": "_test_runner", + "source_code": "def _test_runner(test_id, test_env):\n global _running_in_worker, _env\n _running_in_worker = True\n _env = test_env\n test = unittest.defaultTestLoader.loadTestsFromName(test_id)\n runner = unittest.TextTestRunner()\n result = runner.run(test)\n failures = result.failures + result.expectedFailures + result.errors\n if failures:\n ret = _TestResult(status='failure', message=failures[0][1])\n elif result.skipped:\n ret = _TestResult(status='skipped', message=result.skipped[0][1])\n else:\n ret = _TestResult(status='ok', message=None)\n if ret.message:\n print(ret.message)\n return ret", + "docstring": "Executes the test with the given test_id. This is a simple wrapper around TestRunner to be used with multi_process_runner. Similar to test.main(), but it executes only one test specified by test_id and returns whether the test succeeds. If the test fails, the function prints failures and errors to stdout. Args: test_id: TestCase.id() test_env: a TestEnvironment object. Returns: A boolean indicates whether the test succeeds.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py", + "ast_data": "FunctionDef name:_test_runner arg:test_id arg:test_env arguments arg arg Assign Assign Assign Call Assign Call Assign Call Assign If Assign Call If Assign Call Assign Call If Call Return return:yes" + }, + { + "library": "pytorch", + "name": "extract_subgraph", + "source_code": "def extract_subgraph(orig_module: nn.Module, nodes: list[fx.Node], inputs: list[fx.Node], outputs: list[fx.Node]):\n new_graph = fx.Graph()\n env: dict[fx.Node, fx.Node] = {}\n for input in inputs:\n new_node = new_graph.placeholder(input.name)\n env[input] = new_node\n for node in nodes:\n new_node = new_graph.node_copy(node, lambda x: env[x])\n env[node] = new_node\n new_graph.output([env[output] for output in outputs])\n new_graph.lint()\n return fx.GraphModule(orig_module, new_graph)", + "docstring": "Given lists of nodes from an existing graph that represent a subgraph, returns a submodule that executes that subgraph.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\optimization.py", + "ast_data": "FunctionDef name:extract_subgraph arg:orig_module arg:nodes arg:inputs arg:outputs arguments arg arg arg arg Assign Call For Assign Call Assign For Assign Call arguments arg Assign Call Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "format", + "source_code": "def format(self, tree):\n output = []\n\n def ascend(branch, depth=1):\n for parent, grandparents in branch:\n output.append(' ' * depth + self._format(parent))\n if grandparents:\n ascend(grandparents, depth + 1)\n ascend(tree)\n return output", + "docstring": "Return a list of string reprs from a nested list of referrers.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\gctools.py", + "ast_data": "FunctionDef name:format arg:self arg:tree arguments arg arg Assign FunctionDef name:ascend arg:branch arg:depth arguments arg arg For Call Call If Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "should_trigger_for_step", + "source_code": "def should_trigger_for_step(self, step):\n if self._last_triggered_step is None:\n return True\n if self._last_triggered_step == step:\n return False\n if self._every_secs is not None:\n if time.time() >= self._last_triggered_time + self._every_secs:\n return True\n if self._every_steps is not None:\n if step >= self._last_triggered_step + self._every_steps:\n return True\n return False", + "docstring": "Return true if the timer should trigger for the specified step. Args: step: Training step to trigger on. Returns: True if the difference between the current time and the time of the last trigger exceeds , or if the difference between the current step and the last triggered step exceeds . False otherwise.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py", + "ast_data": "FunctionDef name:should_trigger_for_step arg:self arg:step arguments arg arg If Compare Return return:yes If Compare Return return:yes If Compare If Compare Call Return return:yes If Compare If Compare Return return:yes Return return:yes" + }, + { + "library": "scrapy", + "name": "clientConnectionFailed", + "source_code": "def clientConnectionFailed(self, _, reason):\n if self.waiting:\n self.waiting = 0\n self._disconnectedDeferred.callback(None)\n self.deferred.errback(reason)", + "docstring": "When a connection attempt fails, the request cannot be issued. If no result has yet been provided to the result Deferred, provide the connection failure reason as an error result.", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\downloader\\webclient.py", + "ast_data": "FunctionDef name:clientConnectionFailed arg:self arg:_ arg:reason arguments arg arg arg If Assign Call Call" + }, + { + "library": "scipy", + "name": "f4", + "source_code": "def f4(x):\n if x > 1:\n return 1.0 + 0.1 * x\n if x < 1:\n return -1.0 + 0.1 * x\n return 0", + "docstring": "Piecewise linear, left- and right- discontinuous at x=1, the root.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_tstutils.py", + "ast_data": "FunctionDef name:f4 arg:x arguments arg If Compare Return return:yes If Compare Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "autoescape", + "source_code": "@register.tag\ndef autoescape(parser, token):\n args = token.contents.split()\n if len(args) != 2:\n raise TemplateSyntaxError(\"'autoescape' tag requires exactly one argument.\")\n arg = args[1]\n if arg not in ('on', 'off'):\n raise TemplateSyntaxError(\"'autoescape' argument should be 'on' or 'off'\")\n nodelist = parser.parse(('endautoescape',))\n parser.delete_first_token()\n return AutoEscapeControlNode(arg == 'on', nodelist)", + "docstring": "Force autoescape behavior for this block.", + "type": "function", + "file_path": "django\\django\\template\\defaulttags.py", + "ast_data": "FunctionDef name:autoescape arg:parser arg:token arguments arg arg Assign Call If Compare Call Raise Call Assign If Compare Raise Call Assign Call Call Return return:yes Call Compare" + }, + { + "library": "tensorflow", + "name": "survival_function", + "source_code": "def survival_function(self, value, name='survival_function'):\n return self._call_survival_function(value, name)", + "docstring": "Survival function. Given random variable , the survival function is defined: Args: value: or . name: Python prepended to names of ops created by this function. Returns: of shape with values of type .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py", + "ast_data": "FunctionDef name:survival_function arg:self arg:value arg:name arguments arg arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "describe_null", + "source_code": "@property\n@abstractmethod\ndef describe_null(self) -> tuple[ColumnNullType, Any]:\n pass", + "docstring": "Return the missing value (or \"null\") representation the column dtype uses, as a tuple ``. Value : if kind is \"sentinel value\", the actual value. If kind is a bit mask or a byte mask, the value (0 or 1) indicating a missing value. None otherwise.", + "type": "method", + "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py", + "ast_data": "FunctionDef name:describe_null arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "_call_location", + "source_code": "def _call_location():\n frame = tf_inspect.currentframe()\n assert frame.f_back.f_code.co_name == '_tfmw_add_deprecation_warning', 'This function should be called directly from _tfmw_add_deprecation_warning, as the caller is identified heuristically by chopping off the top stack frames.'\n for _ in range(3):\n parent = frame.f_back\n if parent is None:\n break\n frame = parent\n return '{}:{}'.format(frame.f_code.co_filename, frame.f_lineno)", + "docstring": "Extracts the caller filename and line number as a string. Returns: A string describing the caller source location.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\module_wrapper.py", + "ast_data": "FunctionDef name:_call_location arguments Assign Call Compare For Call Assign If Compare Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "parse_et_yaml_struct", + "source_code": "def parse_et_yaml_struct(es: object) -> ETKernelIndex:\n indices: dict[OperatorName, dict[ETKernelKey, BackendMetadata]] = {}\n for ei in es:\n e = ei.copy()\n funcs = e.pop('func')\n assert isinstance(funcs, str), f'not a str: {funcs}'\n namespace_helper = NamespaceHelper.from_namespaced_entity(namespaced_entity=funcs, max_level=1)\n opname = FunctionSchema.parse(namespace_helper.entity_name).name\n assert opname not in indices, f'Duplicate func found in yaml: {opname} already'\n if len((index := parse_from_yaml(e))) != 0:\n indices[opname] = index\n return ETKernelIndex(indices)", + "docstring": "Given a loaded yaml representing a list of operators, for each op extract the mapping of to (the latter representing the kernel instance that should be used by the kernel key).", + "type": "function", + "file_path": "pytorch\\torchgen\\executorch\\parse.py", + "ast_data": "FunctionDef name:parse_et_yaml_struct arg:es arguments arg For Assign Call Assign Call Call Assign Call Assign Call Compare If Compare Call Call Assign Return return:yes Call" + }, + { + "library": "kornia", + "name": "denormalize_pixel_coordinates3d", + "source_code": "def denormalize_pixel_coordinates3d(pixel_coordinates: Tensor, depth: int, height: int, width: int, eps: float=1e-08) -> Tensor:\n if pixel_coordinates.shape[-1] != 3:\n raise ValueError(f'Input pixel_coordinates must be of shape (*, 3). Got {pixel_coordinates.shape}')\n dhw: Tensor = stack([tensor(depth), tensor(width), tensor(height)]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)\n factor: Tensor = tensor(2.0) / (dhw - 1).clamp(eps)\n return tensor(1.0) / factor * (pixel_coordinates + 1)", + "docstring": "Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates: the normalized grid coordinates. Shape can be :math:. depth: the maximum depth in the x-axis. height: the maximum height in the y-axis. width: the maximum width in the x-axis. eps: safe division by zero. Return: the denormalized pixel coordinates.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:denormalize_pixel_coordinates3d arg:pixel_coordinates arg:depth arg:height arg:width arg:eps arguments arg arg arg arg arg If Compare Raise Call Call Call Call Call Call Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "infer_interface_methods_to_compile", + "source_code": "def infer_interface_methods_to_compile(nn_module):\n stubs = [make_stub_from_method(nn_module, method) for method in mod_interface.getMethodNames()]\n return stubs", + "docstring": "Rule to infer the methods from the interface type. It is used to know which methods need to act as starting points for compilation.", + "type": "function", + "file_path": "pytorch\\torch\\jit\\_recursive.py", + "ast_data": "FunctionDef name:infer_interface_methods_to_compile arg:nn_module arguments arg Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_conform_to_outputs", + "source_code": "def _conform_to_outputs(self, outputs, struct):\n struct = map_to_output_names(outputs, self._output_names, struct)\n struct = map_missing_dict_keys(outputs, struct)\n if not nest.is_nested(struct) and nest.is_nested(outputs):\n struct = nest.map_structure(lambda _: struct, outputs)\n return struct", + "docstring": "Convenience method to conform to structure. Mappings performed: (1) Map a dict to a list of outputs, using the output names. (2) Fill missing keys in a dict w/ s. (3) Map a single item to all outputs. Args: outputs: Model predictions. struct: Arbitrary nested structure (e.g. of labels, sample_weights, losses, or metrics). Returns: Mapping of to structure.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py", + "ast_data": "FunctionDef name:_conform_to_outputs arg:self arg:outputs arg:struct arguments arg arg arg Assign Call Assign Call If BoolOp Call Call Assign Call arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "OutputComparisonLogger", + "source_code": "class OutputComparisonLogger(OutputLogger):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.comparison_fn = torch.ao.ns.fx.utils.compute_sqnr\n self.comparison_fn_name = 'sqnr'\n self.comparisons = []\n\n def forward(self, x, x_ref):\n if not self.enabled:\n return x\n assert isinstance(x, torch.Tensor), 'non-tensor inputs not yet supported'\n if self.save_activations:\n self.stats.append(x.detach())\n self.comparisons.append(self.comparison_fn(x, x_ref))\n return x\n\n def __repr__(self):\n clean_dict = {k: v for k, v in self.__dict__.items() if k != 'training' and (not k.startswith('_'))}\n return f'OutputComparisonLogger({clean_dict})'", + "docstring": "Same as OutputLogger, but also requires the original activation in order to calculate the comparison at calibration time", + "type": "class", + "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite_fx.py", + "ast_data": "ClassDef name:OutputComparisonLogger FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Assign Assign FunctionDef name:forward arg:self arg:x arg:x_ref arguments arg arg arg If Return return:yes Call If Call Call Call Call Return return:yes FunctionDef name:__repr__ arg:self arguments arg Assign Call BoolOp Compare Call Return return:yes" + }, + { + "library": "pytorch", + "name": "benchmark_codegened_module", + "source_code": "def benchmark_codegened_module(self, module: ModuleType, device: torch.device) -> tuple[float, str]:\n self.current_device = device\n backend = self.get_backend(device)\n with dynamo_timed('benchmark_fused_nodes'):\n return backend.benchmark_codegened_module(module)", + "docstring": "Benchmark fused list of nodes and return the execution time in milliseconds on randomly generated inputs.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:benchmark_codegened_module arg:self arg:module arg:device arguments arg arg arg Assign Assign Call With Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "start_standard_services", + "source_code": "def start_standard_services(self, sess):\n if not self._is_chief:\n raise RuntimeError('Only chief supervisor can start standard services. Because only chief supervisors can write events.')\n if not self._logdir:\n logging.warning(\"Standard services need a 'logdir' passed to the SessionManager\")\n return\n if self._global_step is not None and self._summary_writer:\n current_step = training_util.global_step(sess, self._global_step)\n self._summary_writer.add_session_log(SessionLog(status=SessionLog.START), current_step)\n threads = []\n if self._save_summaries_secs and self._summary_writer:\n if self._summary_op is not None:\n threads.append(SVSummaryThread(self, sess))\n if self._global_step is not None:\n threads.append(SVStepCounterThread(self, sess))\n if self.saver and self._save_model_secs:\n threads.append(SVTimerCheckpointThread(self, sess))\n for t in threads:\n t.start()\n return threads", + "docstring": "Start the standard services for 'sess'. This starts services in the background. The services started depend on the parameters to the constructor and may include: - A Summary thread computing summaries every save_summaries_secs. - A Checkpoint thread saving the model every save_model_secs. - A StepCounter thread measure step time. Args: sess: A Session. Returns: A list of threads that are running the standard services. You can use the Supervisor's Coordinator to join these threads with: sv.coord.Join() Raises: RuntimeError: If called with a non-chief Supervisor. ValueError: If not was passed to the constructor as the services need a log directory.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py", + "ast_data": "FunctionDef name:start_standard_services arg:self arg:sess arguments arg arg If Raise Call If Call Return return:no If BoolOp Compare Assign Call Call Call Assign If BoolOp If Compare Call Call If Compare Call Call If BoolOp Call Call For Call Return return:yes" + }, + { + "library": "scipy", + "name": "entropy", + "source_code": "def entropy(self, alpha):\n alpha = _dirichlet_check_parameters(alpha)\n alpha0 = np.sum(alpha)\n lnB = _lnB(alpha)\n K = alpha.shape[0]\n out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum((alpha - 1) * scipy.special.psi(alpha))\n return _squeeze_output(out)", + "docstring": "Differential entropy of the Dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- h : scalar Entropy of the Dirichlet distribution", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:entropy arg:self arg:alpha arguments arg arg Assign Call Assign Call Assign Call Assign Assign Call Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_get_timestamp_range_edges", + "source_code": "def _get_timestamp_range_edges(first: Timestamp, last: Timestamp, freq: BaseOffset, unit: str, closed: Literal['right', 'left']='left', origin: TimeGrouperOrigin='start_day', offset: Timedelta | None=None) -> tuple[Timestamp, Timestamp]:\n if isinstance(freq, Tick):\n index_tz = first.tz\n if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None):\n raise ValueError('The origin must have the same timezone as the index.')\n if origin == 'epoch':\n origin = Timestamp('1970-01-01', tz=index_tz)\n if isinstance(freq, Day):\n first = first.tz_localize(None)\n last = last.tz_localize(None)\n if isinstance(origin, Timestamp):\n origin = origin.tz_localize(None)\n first, last = _adjust_dates_anchored(first, last, freq, closed=closed, origin=origin, offset=offset, unit=unit)\n if isinstance(freq, Day):\n first = first.tz_localize(index_tz)\n last = last.tz_localize(index_tz, nonexistent='shift_forward')\n else:\n first = first.normalize()\n last = last.normalize()\n if closed == 'left':\n first = Timestamp(freq.rollback(first))\n else:\n first = Timestamp(first - freq)\n last = Timestamp(last + freq)\n return (first, last)", + "docstring": "Adjust the Timestamp to the preceding Timestamp that resides on the provided offset. Adjust the Timestamp to the following Timestamp that resides on the provided offset. Input Timestamps that already reside on the offset will be adjusted depending on the type of offset and the parameter. Parameters ---------- first : pd.Timestamp The beginning Timestamp of the range to be adjusted. last : pd.Timestamp The ending Timestamp of the range to be adjusted. freq : pd.DateOffset The dateoffset to which the Timestamps will be adjusted. closed : {'right', 'left'}, default \"left\" Which side of bin interval is closed. origin : {'epoch', 'start', 'start_day'} or Timestamp, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a timestamp is not used, these values are also supported: - 'epoch': is 1970-01-01 - 'start': is the first value of the timeseries - 'start_day': is the first day at midnight of the timeseries offset : pd.Timedelta, default is None An offset timedelta added to the origin. Returns ------- A tuple of length 2, containing the adjusted pd.Timestamp objects.", + "type": "function", + "file_path": "pandas\\pandas\\core\\resample.py", + "ast_data": "FunctionDef name:_get_timestamp_range_edges arg:first arg:last arg:freq arg:unit arg:closed arg:origin arg:offset arguments arg arg arg arg arg arg arg If Call Assign If BoolOp Call Compare Compare Compare Raise Call If Compare Assign Call If Call Assign Call Assign Call If Call Assign Call Assign Call If Call Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "_ldl_get_d_and_l", + "source_code": "def _ldl_get_d_and_l(ldu, pivs, lower=True, hermitian=True):\n is_c = iscomplexobj(ldu)\n d = diag(diag(ldu))\n n = d.shape[0]\n blk_i = 0\n x, y = (1, 0) if lower else (0, 1)\n lu = tril(ldu, -1) if lower else triu(ldu, 1)\n diag_inds = arange(n)\n lu[diag_inds, diag_inds] = 1\n for blk in pivs[pivs != 0]:\n inc = blk_i + blk\n if blk == 2:\n d[blk_i + x, blk_i + y] = ldu[blk_i + x, blk_i + y]\n if is_c and hermitian:\n d[blk_i + y, blk_i + x] = ldu[blk_i + x, blk_i + y].conj()\n else:\n d[blk_i + y, blk_i + x] = ldu[blk_i + x, blk_i + y]\n lu[blk_i + x, blk_i + y] = 0.0\n blk_i = inc\n return (d, lu)", + "docstring": "Helper function to extract the diagonal and triangular matrices for LDL.T factorization. Parameters ---------- ldu : ndarray The compact output returned by the LAPACK routing pivs : ndarray The sanitized array of {0, 1, 2} denoting the sizes of the pivots. For every 2 there is a succeeding 0. lower : bool, optional If set to False, upper triangular part is considered. hermitian : bool, optional If set to False a symmetric complex array is assumed. Returns ------- d : ndarray The block diagonal matrix. lu : ndarray The upper/lower triangular matrix", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_decomp_ldl.py", + "ast_data": "FunctionDef name:_ldl_get_d_and_l arg:ldu arg:pivs arg:lower arg:hermitian arguments arg arg arg arg Assign Call Assign Call Call Assign Assign Assign Assign Call Call Assign Call Assign For Compare Assign If Compare Assign If BoolOp Assign Call Assign Assign Assign Return return:yes" + }, + { + "library": "scipy", + "name": "_newton", + "source_code": "def _newton(n, x_initial, maxit=5):\n mu = sqrt(2.0 * n + 1.0)\n t = x_initial / mu\n theta = arccos(t)\n for i in range(maxit):\n u, ud = _pbcf(n, theta)\n dtheta = u / (sqrt(2.0) * mu * sin(theta) * ud)\n theta = theta + dtheta\n if max(abs(dtheta)) < 1e-14:\n break\n x = mu * cos(theta)\n if n % 2 == 1:\n x[0] = 0.0\n w = exp(-x ** 2) / (2.0 * ud ** 2)\n return (x, w)", + "docstring": "Newton iteration for polishing the asymptotic approximation to the zeros of the Hermite polynomials. Parameters ---------- n : int Quadrature order x_initial : ndarray Initial guesses for the roots maxit : int Maximal number of Newton iterations. The default 5 is sufficient, usually only one or two steps are needed. Returns ------- nodes : ndarray Quadrature nodes weights : ndarray Quadrature weights See Also -------- roots_hermite_asy", + "type": "function", + "file_path": "scipy\\scipy\\special\\_orthogonal.py", + "ast_data": "FunctionDef name:_newton arg:n arg:x_initial arg:maxit arguments arg arg arg Assign Call Assign Assign Call For Call Assign Call Assign Call Call Assign If Compare Call Call Assign Call If Compare Assign Assign Call Return return:yes" + }, + { + "library": "authlib", + "name": "authenticate_user", + "source_code": "def authenticate_user(self, subject):\n raise NotImplementedError()", + "docstring": "Authenticate user with the given assertion claims. Developers MUST implement it in subclass, e.g.:: def authenticate_user(self, subject): return User.get_by_sub(subject) :param subject: \"sub\" value in claims :return: User instance", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7523\\jwt_bearer.py", + "ast_data": "FunctionDef name:authenticate_user arg:self arg:subject arguments arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "import_event", + "source_code": "def import_event(tensor, name=None):\n return gen_summary_ops.import_event(_summary_state.writer._resource, tensor, name=name)", + "docstring": "Writes a binary proto. This can be used to import existing event logs into a new summary writer sink. Please note that this is lower level than the other summary functions and will ignore the setting. Args: tensor: A of type containing a serialized proto. name: A name for the operation (optional). Returns: The created .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:import_event arg:tensor arg:name arguments arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "copy", + "source_code": "def copy(self) -> Self:\n left = self._left.copy()\n right = self._right.copy()\n dtype = self.dtype\n return self._simple_new(left, right, dtype=dtype)", + "docstring": "Return a copy of the array. Returns ------- IntervalArray", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\interval.py", + "ast_data": "FunctionDef name:copy arg:self arguments arg Assign Call Assign Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_config_for_enable_caching_device", + "source_code": "def _config_for_enable_caching_device(rnn_cell):\n default_enable_caching_device = ops.executing_eagerly_outside_functions()\n if rnn_cell._enable_caching_device != default_enable_caching_device:\n return {'enable_caching_device': rnn_cell._enable_caching_device}\n return {}", + "docstring": "Return the dict config for RNN cell wrt to enable_caching_device field. Since enable_caching_device is a internal implementation detail for speed up the RNN variable read when running on the multi remote worker setting, we don't want this config to be serialized constantly in the JSON. We will only serialize this field when a none default value is used to create the cell. Args: rnn_cell: the RNN cell for serialize. Returns: A dict which contains the JSON config for enable_caching_device value or empty dict if the enable_caching_device value is same as the default value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py", + "ast_data": "FunctionDef name:_config_for_enable_caching_device arg:rnn_cell arguments arg Assign Call If Compare Return return:yes Return return:no" + }, + { + "library": "tensorflow", + "name": "validate_dense_weights", + "source_code": "def validate_dense_weights(values, weights, dtype=None):\n if weights is None:\n if dtype:\n return array_ops.constant([], dtype=dtype)\n return array_ops.constant([], dtype=values.dtype)\n if not isinstance(weights, tensor.Tensor):\n raise ValueError(f'Argument `weights` must be a tf.Tensor if `values` is a tf.Tensor. Received weights={weights} of type: {type(weights).__name__}')\n return weights", + "docstring": "Validates the passed weight tensor or creates an empty one.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\bincount_ops.py", + "ast_data": "FunctionDef name:validate_dense_weights arg:values arg:weights arg:dtype arguments arg arg arg If Compare If Return return:yes Call Return return:yes Call If Call Raise Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_path", + "source_code": "def get_path(self):\n return Path.unit_rectangle()", + "docstring": "Return the vertices of the rectangle.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:get_path arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "has_worker_context", + "source_code": "def has_worker_context():\n return dc_context.get_current_worker_context() is not None", + "docstring": "Returns whether a worker context has been entered.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_worker_util.py", + "ast_data": "FunctionDef name:has_worker_context arguments Return return:yes Compare Call" + }, + { + "library": "django", + "name": "get_select_mask", + "source_code": "def get_select_mask(self):\n field_names, defer = self.deferred_loading\n if not field_names:\n return {}\n mask = {}\n for field_name in field_names:\n part_mask = mask\n for part in field_name.split(LOOKUP_SEP):\n part_mask = part_mask.setdefault(part, {})\n opts = self.get_meta()\n if defer:\n return self._get_defer_select_mask(opts, mask)\n return self._get_only_select_mask(opts, mask)", + "docstring": "Convert the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialized on each model. Models that have all their fields included aren't mentioned in the result, only those that have field restrictions in place.", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\query.py", + "ast_data": "FunctionDef name:get_select_mask arg:self arguments arg Assign If Return return:no Assign For Assign For Call Assign Call Assign Call If Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "mean", + "source_code": "def mean(self, m, n):\n M, m, n, _, _, mncond = self._process_parameters(m, n)\n if m.size != 0:\n M, n = (M[..., np.newaxis], n[..., np.newaxis])\n cond = M == 0\n M = np.ma.masked_array(M, mask=cond)\n mu = n * (m / M)\n if m.size != 0:\n mncond = mncond[..., np.newaxis] | np.zeros(mu.shape, dtype=np.bool_)\n return self._checkresult(mu, mncond, np.nan)", + "docstring": "Mean of the multivariate hypergeometric distribution. Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : array_like or scalar The mean of the distribution", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:mean arg:self arg:m arg:n arguments arg arg arg Assign Call If Compare Assign Assign Compare Assign Call Assign If Compare Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_normalize_field_name_to_tuple", + "source_code": "def _normalize_field_name_to_tuple(name: 'FieldName') -> Sequence[str]:\n if isinstance(name, str):\n return (name,)\n if isinstance(name, list):\n return tuple(name)\n assert isinstance(name, tuple)\n return name", + "docstring": "FieldName can be given also as string, this normalizes it to a tuple.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py", + "ast_data": "FunctionDef name:_normalize_field_name_to_tuple arg:name arguments arg If Call Return return:yes If Call Return return:yes Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_mouseover", + "source_code": "def set_mouseover(self, mouseover):\n self._mouseover = bool(mouseover)\n ax = self.axes\n if ax:\n if self._mouseover:\n ax._mouseover_set.add(self)\n else:\n ax._mouseover_set.discard(self)", + "docstring": "Set whether this artist is queried for custom context information when the mouse cursor moves over it. Parameters ---------- mouseover : bool See Also -------- get_cursor_data .ToolCursorPosition .NavigationToolbar2", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:set_mouseover arg:self arg:mouseover arguments arg arg Assign Call Assign If If Call Call" + }, + { + "library": "django", + "name": "is_django_module", + "source_code": "def is_django_module(module):\n return module.__name__.startswith('django.')", + "docstring": "Return True if the given module is nested under Django.", + "type": "function", + "file_path": "django\\django\\utils\\autoreload.py", + "ast_data": "FunctionDef name:is_django_module arg:module arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "WriteGraphOpCreation", + "source_code": "def WriteGraphOpCreation(self, graph_op_creation):\n debug_event = debug_event_pb2.DebugEvent(graph_op_creation=graph_op_creation)\n self._EnsureTimestampAdded(debug_event)\n _pywrap_debug_events_writer.WriteGraphOpCreation(self._dump_root, debug_event)", + "docstring": "Write a GraphOpCreation proto with the writer. Args: graph_op_creation: A GraphOpCreation proto, describing the details of the creation of an op inside a TensorFlow Graph.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_writer.py", + "ast_data": "FunctionDef name:WriteGraphOpCreation arg:self arg:graph_op_creation arguments arg arg Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "deferred_exits", + "source_code": "@property\ndef deferred_exits(self):\n return self._deferred_exits", + "docstring": "The list of \"deferred\" exits.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py", + "ast_data": "FunctionDef name:deferred_exits arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "wrap_py_func", + "source_code": "def wrap_py_func(f, args, kwargs=None):\n tensor_args = []\n tensor_args_idx = {}\n n_args = len(args)\n arg_is_tensor = tuple(map(tensor_util.is_tf_type, args))\n for i in range(n_args):\n if arg_is_tensor[i]:\n tensor_args_idx[i] = len(tensor_args)\n tensor_args.append(args[i])\n if kwargs:\n kwarg_keys = tuple(kwargs.keys())\n kwarg_is_tensor = {k: tensor_util.is_tf_type(kwargs[k]) for k in kwarg_keys}\n for k in kwarg_keys:\n if kwarg_is_tensor[k]:\n tensor_args_idx[k] = len(tensor_args)\n tensor_args.append(kwargs[k])\n else:\n kwarg_keys = ()\n\n def f_wrapper(*tensor_args):\n f_args = tuple((tensor_args[tensor_args_idx[i]] if arg_is_tensor[i] else a for i, a in enumerate(args)))\n f_kwargs = {k: tensor_args[tensor_args_idx[k]] if kwarg_is_tensor[k] else kwargs[k] for i, k in enumerate(kwarg_keys)}\n f(*f_args, **f_kwargs)\n return 1\n return script_ops.eager_py_func(f_wrapper, tensor_args, dtypes.int32)", + "docstring": "Helper that wraps a callable to py_func. The helper passes tensor arguments through the py_func interface. Non-tensor arguments are allowed, and will be passed to f directly. Note that non-tensor arguments are captured by f will not update every time the wrapper is called (this is consistent with its argument list, which only includes the tensor arguments). In general, it's safest not to reuse this wrapper. Args: f: Callable args: Positional arguments for f, as list or tuple. kwargs: Keyword arguments for f, as dict with string keys. May be None. Returns: The return values of f converted to tensor. Raises: ValueError: if any of the arguments are incorrect.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\autograph_ops.py", + "ast_data": "FunctionDef name:wrap_py_func arg:f arg:args arg:kwargs arguments arg arg arg Assign Assign Assign Call Assign Call Call For Call If Assign Call Call If Assign Call Call Assign Call For If Assign Call Call Assign FunctionDef name:f_wrapper arguments arg Assign Call Call Assign Call Call Return return:yes Return return:yes Call" + }, + { + "library": "django", + "name": "stringfilter", + "source_code": "def stringfilter(func):\n\n @wraps(func)\n def _dec(first, *args, **kwargs):\n first = str(first)\n result = func(first, *args, **kwargs)\n if isinstance(first, SafeData) and getattr(unwrap(func), 'is_safe', False):\n result = mark_safe(result)\n return result\n return _dec", + "docstring": "Decorator for filters which should only receive strings. The object passed as the first positional argument will be converted to a string.", + "type": "function", + "file_path": "django\\django\\template\\defaultfilters.py", + "ast_data": "FunctionDef name:stringfilter arg:func arguments arg FunctionDef name:_dec arg:first arguments arg arg arg Assign Call Assign Call If BoolOp Call Call Call Assign Call Return return:yes Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "__call__", + "source_code": "def __call__(self, posA, posB, shrinkA=2.0, shrinkB=2.0, patchA=None, patchB=None):\n path = self.connect(posA, posB)\n path = self._clip(path, self._in_patch(patchA) if patchA else None, self._in_patch(patchB) if patchB else None)\n path = self._clip(path, inside_circle(*path.vertices[0], shrinkA) if shrinkA else None, inside_circle(*path.vertices[-1], shrinkB) if shrinkB else None)\n return path", + "docstring": "Call the *connect* method to create a path between *posA* and *posB*; then clip and shrink the path.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:posA arg:posB arg:shrinkA arg:shrinkB arg:patchA arg:patchB arguments arg arg arg arg arg arg arg Assign Call Assign Call Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "output_classes", + "source_code": "@property\n@deprecation.deprecated(None, 'Use `tf.compat.v1.data.get_output_classes(iterator)`.')\ndef output_classes(self):\n return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), self._element_spec)", + "docstring": "Returns the class of each component of an element of this iterator. The expected values are and . Returns: A (nested) structure of Python objects corresponding to each component of an element of this dataset.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py", + "ast_data": "FunctionDef name:output_classes arg:self arguments arg Return return:yes Call arguments arg Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, parent, isolated=True, function_name=None):\n self.parent = parent\n self.isolated = isolated\n self.function_name = function_name\n self.isolated_names = set()\n self.read = set()\n self.modified = set()\n self.deleted = set()\n self.bound = set()\n self.globals = set()\n self.nonlocals = set()\n self.annotations = set()\n self.params = weakref.WeakValueDictionary()\n self.is_final = False", + "docstring": "Create a new scope. Args: parent: A Scope or None. isolated: Whether the scope is isolated, that is, whether variables modified in this scope should be considered modified in the parent scope. function_name: Name of the function owning this scope.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\activity.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:parent arg:isolated arg:function_name arguments arg arg arg arg Assign Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign" + }, + { + "library": "pytorch", + "name": "build_metadata", + "source_code": "@abstractmethod\ndef build_metadata(self, tensor_sizes: torch.Size, tensor_properties: sharded_tensor_meta.TensorProperties) -> sharded_tensor_meta.ShardedTensorMetadata:\n pass", + "docstring": "Given a global tensor size, define how to shard a tensor like this shape across ranks, return ShardedTensorMetadata Args: tensor_sizes (:class:): The tensor shape to shard on, a object that represents the tensor shape to be sharded according to the ShardingSpec. tensor_properties(:class:ShardedTensorMetadata` object that encodes the information about the layout of the ShardedTensor and its properties.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\api.py", + "ast_data": "FunctionDef name:build_metadata arg:self arg:tensor_sizes arg:tensor_properties arguments arg arg arg" + }, + { + "library": "tensorflow", + "name": "on_train_batch_end", + "source_code": "@doc_controls.for_subclass_implementers\n@generic_utils.default\ndef on_train_batch_end(self, batch, logs=None):\n self.on_batch_end(batch, logs=logs)", + "docstring": "Called at the end of a training batch in methods. Subclasses should override for any actions to run. Note that if the argument to in is set to , this method will only be called every batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:on_train_batch_end arg:self arg:batch arg:logs arguments arg arg arg Call" + }, + { + "library": "pandas", + "name": "convert_object_array", + "source_code": "def convert_object_array(content: list[npt.NDArray[np.object_]], dtype: DtypeObj | None, dtype_backend: str='numpy', coerce_float: bool=False) -> list[ArrayLike]:\n\n def convert(arr):\n if dtype != np.dtype('O'):\n arr = lib.maybe_convert_objects(arr, try_float=coerce_float, convert_to_nullable_dtype=dtype_backend != 'numpy')\n if dtype is None:\n if arr.dtype == np.dtype('O'):\n convert_to_nullable_dtype = dtype_backend != 'numpy'\n arr = maybe_infer_to_datetimelike(arr, convert_to_nullable_dtype)\n if convert_to_nullable_dtype and arr.dtype == np.dtype('O'):\n new_dtype = StringDtype()\n arr_cls = new_dtype.construct_array_type()\n arr = arr_cls._from_sequence(arr, dtype=new_dtype)\n elif dtype_backend != 'numpy' and isinstance(arr, np.ndarray):\n if arr.dtype.kind in 'iufb':\n arr = pd_array(arr, copy=False)\n elif isinstance(dtype, ExtensionDtype):\n cls = dtype.construct_array_type()\n arr = cls._from_sequence(arr, dtype=dtype, copy=False)\n elif dtype.kind in 'mM':\n arr = maybe_cast_to_datetime(arr, dtype)\n return arr\n arrays = [convert(arr) for arr in content]\n return arrays", + "docstring": "Internal function to convert object array. Parameters ---------- content: List[np.ndarray] dtype: np.dtype or ExtensionDtype dtype_backend: Controls if nullable/pyarrow dtypes are returned. coerce_float: Cast floats that are integers to int. Returns ------- List[ArrayLike]", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\construction.py", + "ast_data": "FunctionDef name:convert_object_array arg:content arg:dtype arg:dtype_backend arg:coerce_float arguments arg arg arg arg FunctionDef name:convert arg:arr arguments arg If Compare Call Assign Call Compare If Compare If Compare Call Assign Compare Assign Call If BoolOp Compare Call Assign Call Assign Call Assign Call If BoolOp Compare Call If Compare Assign Call If Call Assign Call Assign Call If Compare Assign Call Return return:yes Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "after_run", + "source_code": "def after_run(self, run_context, run_values):\n pass", + "docstring": "Called after each call to run(). The argument contains results of requested ops/tensors by . The argument is the same one send to call. can be called to stop the iteration. If raises any exceptions then is not called. Args: run_context: A object. run_values: A SessionRunValues object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py", + "ast_data": "FunctionDef name:after_run arg:self arg:run_context arg:run_values arguments arg arg arg" + }, + { + "library": "scipy", + "name": "Matyas", + "source_code": "class Matyas(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n self.global_optimum = [[0 for _ in range(self.N)]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1]", + "docstring": "Matyas objective function. This class defines the Matyas [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Matyas}}(x) = 0.26(x_1^2 + x_2^2) - 0.48 x_1 x_2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py", + "ast_data": "ClassDef name:Matyas FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes" + }, + { + "library": "django", + "name": "aflush", + "source_code": "async def aflush(self):\n self.clear()\n await self.adelete(self.session_key)\n self._session_key = None", + "docstring": "See flush().", + "type": "method", + "file_path": "django\\django\\contrib\\sessions\\backends\\cached_db.py", + "ast_data": "AsyncFunctionDef name:aflush arg:self arguments arg Call Call Assign" + }, + { + "library": "tensorflow", + "name": "_get_or_make_slot", + "source_code": "def _get_or_make_slot(self, var, val, slot_name, op_name):\n named_slots = self._slot_dict(slot_name)\n if _var_key(var) not in named_slots:\n new_slot_variable = slot_creator.create_slot(var, val, op_name, copy_xla_sharding=True)\n self._restore_slot_variable(slot_name=slot_name, variable=var, slot_variable=new_slot_variable)\n named_slots[_var_key(var)] = new_slot_variable\n return named_slots[_var_key(var)]", + "docstring": "Find or create a slot for a variable. Args: var: A object. val: A . The initial value of the slot. slot_name: Name for the slot. op_name: Name to use when scoping the Variable that needs to be created for the slot. Returns: A object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py", + "ast_data": "FunctionDef name:_get_or_make_slot arg:self arg:var arg:val arg:slot_name arg:op_name arguments arg arg arg arg arg Assign Call If Compare Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "open", + "source_code": "def open(self, name, mode='rb'):\n return self._open(name, mode)", + "docstring": "Retrieve the specified file from storage.", + "type": "method", + "file_path": "django\\django\\core\\files\\storage\\base.py", + "ast_data": "FunctionDef name:open arg:self arg:name arg:mode arguments arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "__init__", + "source_code": "@docfiller\ndef __init__(self, mat_stream, byte_order=None, mat_dtype=False, squeeze_me=False, chars_as_strings=True, matlab_compatible=False, struct_as_record=True, verify_compressed_data_integrity=True, simplify_cells=False):\n self.mat_stream = mat_stream\n self.dtypes = {}\n if not byte_order:\n byte_order = self.guess_byte_order()\n else:\n byte_order = boc.to_numpy_code(byte_order)\n self.byte_order = byte_order\n self.struct_as_record = struct_as_record\n if matlab_compatible:\n self.set_matlab_compatible()\n else:\n self.squeeze_me = squeeze_me\n self.chars_as_strings = chars_as_strings\n self.mat_dtype = mat_dtype\n self.verify_compressed_data_integrity = verify_compressed_data_integrity\n self.simplify_cells = simplify_cells\n if simplify_cells:\n self.squeeze_me = True\n self.struct_as_record = False", + "docstring": "Initializer for mat file reader mat_stream : file-like object with file API, open for reading %(load_args)s", + "type": "method", + "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:mat_stream arg:byte_order arg:mat_dtype arg:squeeze_me arg:chars_as_strings arg:matlab_compatible arg:struct_as_record arg:verify_compressed_data_integrity arg:simplify_cells arguments arg arg arg arg arg arg arg arg arg arg Assign Assign If Assign Call Assign Call Assign Assign If Call Assign Assign Assign Assign Assign If Assign Assign" + }, + { + "library": "numpy", + "name": "hermvander", + "source_code": "def hermvander(x, deg):\n ideg = pu._as_int(deg, 'deg')\n if ideg < 0:\n raise ValueError('deg must be non-negative')\n x = np.array(x, copy=None, ndmin=1) + 0.0\n dims = (ideg + 1,) + x.shape\n dtyp = x.dtype\n v = np.empty(dims, dtype=dtyp)\n v[0] = x * 0 + 1\n if ideg > 0:\n x2 = x * 2\n v[1] = x2\n for i in range(2, ideg + 1):\n v[i] = v[i - 1] * x2 - v[i - 2] * (2 * (i - 1))\n return np.moveaxis(v, 0, -1)", + "docstring": "Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree and sample points . The pseudo-Vandermonde matrix is defined by .. math:: V[..., i] = H_i(x), where ``0 >> import numpy as np >>> from numpy.polynomial.hermite import hermvander >>> x = np.array([-1, 0, 1]) >>> hermvander(x, 3) array([[ 1., -2., 2., 4.], [ 1., 0., -2., -0.], [ 1., 2., 2., -4.]])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\hermite.py", + "ast_data": "FunctionDef name:hermvander arg:x arg:deg arguments arg arg Assign Call If Compare Raise Call Assign Call Assign Assign Assign Call Assign If Compare Assign Assign For Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_maximum_flops", + "source_code": "@ops.RegisterStatistics('Maximum', 'flops')\ndef _maximum_flops(graph, node):\n return _binary_per_element_op_flops(graph, node)", + "docstring": "Compute flops for Maximum operation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py", + "ast_data": "FunctionDef name:_maximum_flops arg:graph arg:node arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_get_outer_context_and_inner_device_stack", + "source_code": "def _get_outer_context_and_inner_device_stack() -> tuple[Callable[[], ContextManager[Graph]], traceable_stack.TraceableStack]:\n default_graph = get_default_graph()\n outer_context = None\n innermost_nonempty_device_stack = default_graph._device_function_stack\n if not _default_graph_stack.stack:\n if default_graph.building_function:\n raise RuntimeError('The global graph is building a function.')\n outer_context = default_graph.as_default\n else:\n for stack_entry in reversed(context.context().context_switches.stack):\n if not innermost_nonempty_device_stack:\n innermost_nonempty_device_stack = stack_entry.device_stack\n if not stack_entry.is_building_function:\n outer_context = stack_entry.enter_context_fn\n break\n if outer_context is None:\n outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default\n if outer_context is None:\n raise RuntimeError('All graphs are building functions, and no eager context was previously active.')\n return (outer_context, innermost_nonempty_device_stack)", + "docstring": "Get the outermost context not building a function.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_get_outer_context_and_inner_device_stack arguments Assign Call Assign Assign If If Raise Call Assign For Call Call If Assign If Assign If Compare Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_create_pseudo_names", + "source_code": "def _create_pseudo_names(tensors, prefix):\n\n def one_index(ele):\n if isinstance(ele, int):\n return ele + 1\n return ele\n flat_paths = list(nest.yield_flat_paths(tensors))\n flat_paths = nest.map_structure(one_index, flat_paths)\n names = []\n for path in flat_paths:\n if not path:\n name = prefix + '1'\n else:\n name = '_'.join((str(p) for p in path))\n if isinstance(path[0], int):\n name = prefix + name\n names.append(name)\n return names", + "docstring": "Creates pseudo {input | output} names for subclassed Models. Warning: this function should only be used to define default names for and . No other use cases should rely on a 's input or output names. Example with dict: becomes: Example with list: becomes: Args: tensors: 's outputs or inputs. prefix: 'output_' for outputs, 'input_' for inputs. Returns: Flattened list of pseudo names.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\tflite_keras_util.py", + "ast_data": "FunctionDef name:_create_pseudo_names arg:tensors arg:prefix arguments arg arg FunctionDef name:one_index arg:ele arguments arg If Call Return return:yes Return return:yes Assign Call Call Assign Call Assign For If Assign Assign Call Call If Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "harden_mask", + "source_code": "def harden_mask(self):\n self._hardmask = True", + "docstring": "Forces the mask to hard.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\mrecords.py", + "ast_data": "FunctionDef name:harden_mask arg:self arguments arg Assign" + }, + { + "library": "scipy", + "name": "gaussian_gradient_magnitude", + "source_code": "@_ni_docstrings.docfiller\ndef gaussian_gradient_magnitude(input, sigma, output=None, mode='reflect', cval=0.0, *, axes=None, **kwargs):\n input = np.asarray(input)\n\n def derivative(input, axis, output, mode, cval, sigma, **kwargs):\n order = [0] * input.ndim\n order[axis] = 1\n return gaussian_filter(input, sigma, order, output, mode, cval, **kwargs)\n return generic_gradient_magnitude(input, derivative, output, mode, cval, extra_arguments=(sigma,), extra_keywords=kwargs, axes=axes)", + "docstring": "Multidimensional gradient magnitude using Gaussian derivatives. Parameters ---------- %(input)s sigma : scalar or sequence of scalars The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. %(output)s %(mode_multiple)s %(cval)s axes : tuple of int or None The axes over which to apply the filter. If or tuples are provided, their length must match the number of axes. Extra keyword arguments will be passed to gaussian_filter(). Returns ------- gaussian_gradient_magnitude : ndarray Filtered array. Has the same shape as . Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = datasets.ascent() >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\ndimage\\_filters.py", + "ast_data": "FunctionDef name:gaussian_gradient_magnitude arg:input arg:sigma arg:output arg:mode arg:cval arguments arg arg arg arg arg arg arg Assign Call FunctionDef name:derivative arg:input arg:axis arg:output arg:mode arg:cval arg:sigma arguments arg arg arg arg arg arg arg Assign Assign Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_prefix_to_checkpoint_path", + "source_code": "def _prefix_to_checkpoint_path(prefix, format_version):\n if format_version == saver_pb2.SaverDef.V2:\n return prefix + '.index'\n return prefix", + "docstring": "Returns the pathname of a checkpoint file, given the checkpoint prefix. For V1 checkpoint, simply returns the prefix itself (the data file). For V2, returns the pathname to the index file. Args: prefix: a string, the prefix of a checkpoint. format_version: the checkpoint format version that corresponds to the prefix. Returns: The pathname of a checkpoint file, taking into account the checkpoint format version.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py", + "ast_data": "FunctionDef name:_prefix_to_checkpoint_path arg:prefix arg:format_version arguments arg arg If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_TensorListScatterIntoExistingListGrad", + "source_code": "@ops.RegisterGradient('TensorListScatterIntoExistingList')\ndef _TensorListScatterIntoExistingListGrad(op: ops.Operation, dlist):\n _, tensor, indices = op.inputs\n dtensor = gen_list_ops.tensor_list_gather(dlist, indices, element_shape=array_ops.slice(array_ops.shape(tensor), [1], [-1]), element_dtype=tensor.dtype)\n zeros = array_ops.zeros_like(tensor)\n dlist = tensor_list_scatter(zeros, indices, indices, input_handle=dlist)\n return (dlist, dtensor, None)", + "docstring": "Gradient function for TensorListScatterIntoExistingList.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\list_ops.py", + "ast_data": "FunctionDef name:_TensorListScatterIntoExistingListGrad arg:op arg:dlist arguments arg arg Assign Assign Call Call Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_shapes", + "source_code": "def _shapes(tensor_list_list, shapes, enqueue_many):\n if shapes is None:\n len0 = len(tensor_list_list[0])\n for tl in tensor_list_list:\n for i in range(len0):\n if tl[i].shape.ndims is None:\n raise ValueError(\"Cannot infer Tensor's rank: %s\" % tl[i])\n shapes = [_merge_shapes([tl[i].shape.as_list() for tl in tensor_list_list], enqueue_many) for i in range(len0)]\n return shapes", + "docstring": "Calculate and merge the shapes of incoming tensors. Args: tensor_list_list: List of tensor lists. shapes: List of shape tuples corresponding to tensors within the lists. enqueue_many: Boolean describing whether shapes will be enqueued as batches or individual entries. Returns: A list of shapes aggregating shape inference info from , or returning if it is not . Raises: ValueError: If any of the inferred shapes in lack a well defined rank.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\input.py", + "ast_data": "FunctionDef name:_shapes arg:tensor_list_list arg:shapes arg:enqueue_many arguments arg arg arg If Compare Assign Call For For Call If Compare Raise Call Assign Call Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "parse", + "source_code": "def parse(self) -> None:\n self.parse_comments()\n self.parse_definition()", + "docstring": "Parse the source code.", + "type": "method", + "file_path": "sphinx\\sphinx\\pycode\\parser.py", + "ast_data": "FunctionDef name:parse arg:self arguments arg Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, dataset, coordinator):\n if isinstance(dataset, input_lib.DistributedDataset):\n original_dataset = dataset._original_dataset\n serialized = serialize_dataset_to_graph(original_dataset)\n\n def dataset_fn():\n deserialized = deserialize_dataset_from_graph(serialized, original_dataset.element_spec)\n dataset.build(dataset_to_replace=deserialized)\n return dataset\n elif isinstance(dataset, input_lib.DistributedDatasetsFromFunction):\n\n def dataset_fn():\n dataset.build()\n return dataset\n elif isinstance(dataset, dataset_ops.Dataset):\n serialized = serialize_dataset_to_graph(dataset)\n\n def dataset_fn():\n return deserialize_dataset_from_graph(serialized, dataset.element_spec)\n else:\n raise ValueError('Unexpected dataset type!')\n super(PerWorkerDatasetFromDataset, self).__init__(dataset_fn, coordinator)", + "docstring": "Makes an iterable from datasets created by the given dataset. It creates a dataset_fn which deserializes a dataset from a graph under the hood. Args: dataset: A tf.data.Dataset, a DistributedDataset or a DistributedDatasetsFromFunction coordinator: a object, used to create dataset resources.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:dataset arg:coordinator arguments arg arg arg If Call Assign Assign Call FunctionDef name:dataset_fn arguments Assign Call Call Return return:yes If Call FunctionDef name:dataset_fn arguments Call Return return:yes If Call Assign Call FunctionDef name:dataset_fn arguments Return return:yes Call Raise Call Call Call" + }, + { + "library": "tensorflow", + "name": "tensor_data", + "source_code": "@property\ndef tensor_data(self):\n return self._tensor_data", + "docstring": "A map from tensor name to its converted _TensorData.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py", + "ast_data": "FunctionDef name:tensor_data arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_LazyProtocol", + "source_code": "class _LazyProtocol(Protocol):\n\n def _register_load_state_dict_pre_hook(self, hook):\n ...\n\n def register_forward_pre_hook(self, hook, *, prepend=False, with_kwargs=False):\n ...\n\n def _lazy_load_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n ...\n\n def _get_name(self):\n ...\n\n def _infer_parameters(self, module, input):\n ...\n\n @property\n def _parameters(self):\n ...\n\n @property\n def _buffers(self):\n ...\n\n @property\n def _non_persistent_buffers_set(self):\n ...\n\n @property\n def _load_hook(self):\n ...\n\n @property\n def _initialize_hook(self):\n ...", + "docstring": "This class is used to avoid errors with mypy checks for the attributes in a mixin.", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\lazy.py", + "ast_data": "ClassDef name:_LazyProtocol FunctionDef name:_register_load_state_dict_pre_hook arg:self arg:hook arguments arg arg FunctionDef name:register_forward_pre_hook arg:self arg:hook arguments arg arg arg arg FunctionDef name:_lazy_load_hook arg:self arg:state_dict arg:prefix arg:local_metadata arg:strict arg:missing_keys arg:unexpected_keys arg:error_msgs arguments arg arg arg arg arg arg arg arg FunctionDef name:_get_name arg:self arguments arg FunctionDef name:_infer_parameters arg:self arg:module arg:input arguments arg arg arg FunctionDef name:_parameters arg:self arguments arg FunctionDef name:_buffers arg:self arguments arg FunctionDef name:_non_persistent_buffers_set arg:self arguments arg FunctionDef name:_load_hook arg:self arguments arg FunctionDef name:_initialize_hook arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "_pad_strides", + "source_code": "@staticmethod\ndef _pad_strides(in_strides, size, dtype):\n align = get_align_for_dtype(dtype)\n if len(in_strides) == 0:\n return in_strides\n if not config.pad_channels_last and Layout.is_channels_last_contiguous(size, in_strides):\n return in_strides\n current_fx_node = V.get_current_node()\n if hasattr(current_fx_node, 'meta') and current_fx_node.meta.get('dislike_padding', False):\n return in_strides\n if not all((isinstance(s, (int, sympy.Integer)) for s in itertools.chain(in_strides, size))):\n return in_strides\n stride_order = get_stride_order(in_strides)\n fill_order = stride_order2fill_order(stride_order)\n new_strides = [0 for _ in range(len(in_strides))]\n new_strides[fill_order[0]] = 1\n padded = False\n for rank, idx in enumerate(fill_order[1:], start=1):\n prev_idx = fill_order[rank - 1]\n stride = new_strides[prev_idx] * size[prev_idx]\n if stride > config.padding_stride_threshold and stride % align != 0:\n stride = ceildiv(stride, align) * align\n padded = True\n new_strides[idx] = stride\n if not padded:\n return in_strides\n metrics.num_comprehensive_padding += 1\n return new_strides", + "docstring": "The padding does not change stride order but makes sure all strides larger than the threshold are multiple of align.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "FunctionDef name:_pad_strides arg:in_strides arg:size arg:dtype arguments arg arg arg Assign Call If Compare Call Return return:yes If BoolOp Call Return return:yes Assign Call If BoolOp Call Call Return return:yes If Call Call Call Return return:yes Assign Call Assign Call Assign Call Call Assign Assign For Call Assign Assign If BoolOp Compare Compare Assign Call Assign Assign If Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "try_", + "source_code": "@staticmethod\ndef try_(method_fn, *args, **kwargs):\n if not chromium_event_log_active():\n return\n metrics_context = get_metrics_context()\n if not metrics_context.in_progress():\n return\n method_fn(*args, **kwargs)", + "docstring": "Special function that quietly runs a given method, returning if CHROMIUM_EVENT_LOG is None or metrics context is not set", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:try_ arg:method_fn arguments arg arg arg If Call Return return:no Assign Call If Call Return return:no Call" + }, + { + "library": "numpy", + "name": "setdiff1d", + "source_code": "def setdiff1d(ar1, ar2, assume_unique=False):\n if assume_unique:\n ar1 = ma.asarray(ar1).ravel()\n else:\n ar1 = unique(ar1)\n ar2 = unique(ar2)\n return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]", + "docstring": "Set difference of 1D arrays with unique elements. The output is always a masked array. See for more details. See Also -------- numpy.setdiff1d : Equivalent function for ndarrays. Examples -------- >>> import numpy as np >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) >>> np.ma.setdiff1d(x, [1, 2]) masked_array(data=[3, --], mask=[False, True], fill_value=999999)", + "type": "function", + "file_path": "numpy\\numpy\\ma\\extras.py", + "ast_data": "FunctionDef name:setdiff1d arg:ar1 arg:ar2 arg:assume_unique arguments arg arg arg If Assign Call Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "tick_bottom", + "source_code": "def tick_bottom(self):\n label = True\n if 'label1On' in self._major_tick_kw:\n label = self._major_tick_kw['label1On'] or self._major_tick_kw['label2On']\n self.set_ticks_position('bottom')\n self.set_tick_params(which='both', labelbottom=label)", + "docstring": "Move ticks and ticklabels (if present) to the bottom of the Axes.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:tick_bottom arg:self arguments arg Assign If Compare Assign BoolOp Call Call" + }, + { + "library": "django", + "name": "render_to_string", + "source_code": "def render_to_string(self, template_name, context=None):\n if isinstance(template_name, (list, tuple)):\n t = self.select_template(template_name)\n else:\n t = self.get_template(template_name)\n if isinstance(context, Context):\n return t.render(context)\n else:\n return t.render(Context(context, autoescape=self.autoescape))", + "docstring": "Render the template specified by template_name with the given context. For use in Django's test suite.", + "type": "method", + "file_path": "django\\django\\template\\engine.py", + "ast_data": "FunctionDef name:render_to_string arg:self arg:template_name arg:context arguments arg arg arg If Call Assign Call Assign Call If Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "reload_cubin_path", + "source_code": "def reload_cubin_path(self):\n cubin_location = os.path.join(triton_cache_dir(self.compile_meta.get('device', 0)), triton_hash_to_path_key(self.kernel.hash), f'{self.kernel.name}.cubin')\n if not os.path.exists(cubin_location):\n if self.kernel.cubin_raw is not None:\n self.kernel.reload_cubin_from_raw(cubin_location)\n else:\n raise RuntimeError('Cubin file saved by TritonBundler not found at %s', cubin_location)\n self.kernel.cubin_path = cubin_location", + "docstring": "When loading from cache on disk, we want to reload cubin files from their appropriate location on disc.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py", + "ast_data": "FunctionDef name:reload_cubin_path arg:self arguments arg Assign Call Call Call Call If Call If Compare Call Raise Call Assign" + }, + { + "library": "pytorch", + "name": "Parameter", + "source_code": "@dataclasses.dataclass(frozen=True)\nclass Parameter:\n name: str\n type_constraint: TypeConstraintParam\n required: bool\n variadic: bool\n default: Any = _EMPTY_DEFAULT\n\n def __str__(self) -> str:\n type_str = self.type_constraint.name\n if self.has_default():\n return f'{self.name}: {type_str} = {self.default}'\n return f'{self.name}: {type_str}'\n\n def has_default(self) -> bool:\n return self.default is not _EMPTY_DEFAULT", + "docstring": "A formal parameter of an operator.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_schemas.py", + "ast_data": "ClassDef name:Parameter FunctionDef name:__str__ arg:self arguments arg Assign If Call Return return:yes Return return:yes FunctionDef name:has_default arg:self arguments arg Return return:yes Compare Call" + }, + { + "library": "pandas", + "name": "add_memory_usage_line", + "source_code": "def add_memory_usage_line(self) -> None:\n self._lines.append(f'memory usage: {self.memory_usage_string}')", + "docstring": "Add line containing memory usage.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\info.py", + "ast_data": "FunctionDef name:add_memory_usage_line arg:self arguments arg Call" + }, + { + "library": "tensorflow", + "name": "_add_jump_node", + "source_code": "def _add_jump_node(self, ast_node, guards):\n node = self._add_new_node(ast_node)\n self.leaves = set()\n self.finally_sections[node] = guards\n return node", + "docstring": "Grows the graph by adding a jump node. Jump nodes are added to the current leaf set, and the leaf set becomes empty. If the jump node is the last in a cond section, then it may be added back to the leaf set by a separate mechanism. Args: ast_node: ast.AST guards: Tuple[ast.AST, ...], the finally sections active for this node Returns: Node", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py", + "ast_data": "FunctionDef name:_add_jump_node arg:self arg:ast_node arg:guards arguments arg arg arg Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "generate_numba_transform_func", + "source_code": "@functools.cache\ndef generate_numba_transform_func(func: Callable[..., np.ndarray], nopython: bool, nogil: bool, parallel: bool) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]:\n numba_func = jit_user_function(func)\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency('numba')\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def group_transform(values: np.ndarray, index: np.ndarray, begin: np.ndarray, end: np.ndarray, num_columns: int, *args: Any) -> np.ndarray:\n assert len(begin) == len(end)\n num_groups = len(begin)\n result = np.empty((len(values), num_columns))\n for i in numba.prange(num_groups):\n group_index = index[begin[i]:end[i]]\n for j in numba.prange(num_columns):\n group = values[begin[i]:end[i], j]\n result[begin[i]:end[i], j] = numba_func(group, group_index, *args)\n return result\n return group_transform", + "docstring": "Generate a numba jitted transform function specified by values from engine_kwargs. 1. jit the user's function 2. Return a groupby transform function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's function _AND_ the groupby evaluation loop. Parameters ---------- func : function function to be applied to each window and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function", + "type": "function", + "file_path": "pandas\\pandas\\core\\groupby\\numba_.py", + "ast_data": "FunctionDef name:generate_numba_transform_func arg:func arg:nopython arg:nogil arg:parallel arguments arg arg arg arg Assign Call If Assign Call FunctionDef name:group_transform arg:values arg:index arg:begin arg:end arg:num_columns arguments arg arg arg arg arg arg Compare Call Call Assign Call Assign Call Call For Call Assign For Call Assign Assign Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "LaplaceWithSoftplusScale", + "source_code": "class LaplaceWithSoftplusScale(Laplace):\n\n @deprecation.deprecated('2019-01-01', 'Use `tfd.Laplace(loc, tf.nn.softplus(scale)) instead.', warn_once=True)\n def __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name='LaplaceWithSoftplusScale'):\n parameters = dict(locals())\n with ops.name_scope(name, values=[loc, scale]) as name:\n super(LaplaceWithSoftplusScale, self).__init__(loc=loc, scale=nn.softplus(scale, name='softplus_scale'), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name)\n self._parameters = parameters", + "docstring": "Laplace with softplus applied to .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\laplace.py", + "ast_data": "ClassDef name:LaplaceWithSoftplusScale FunctionDef name:__init__ arg:self arg:loc arg:scale arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg arg Assign Call Call With Call Call Call Call Assign Call" + }, + { + "library": "pytorch", + "name": "from_float", + "source_code": "@classmethod\ndef from_float(cls, mod, use_precomputed_fake_quant=False):\n return _ConvNd.from_float(cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant)", + "docstring": "Creates a quantized module from a float module or qparams_dict. Args: mod (Module): a float module, either produced by torch.ao.quantization utilities or provided by the user", + "type": "method", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\conv.py", + "ast_data": "FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "ModelAttributes", + "source_code": "class ModelAttributes(SerializedAttributes.with_attributes('ModelAttributes', copy_from=[LayerAttributes])):\n pass", + "docstring": "Model checkpointable objects + functions that are saved to the SavedModel. List of all attributes: All attributes from LayerAttributes (including CommonEndpoints)", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py", + "ast_data": "ClassDef name:ModelAttributes Call" + }, + { + "library": "pytorch", + "name": "_get_state_dict_2d_layout", + "source_code": "def _get_state_dict_2d_layout(state_dict: STATE_DICT_TYPE) -> tuple[STATE_DICT_2D_LAYOUT, Optional[dist.ProcessGroup]]:\n specs: STATE_DICT_2D_LAYOUT = {}\n dp_pg: Optional[dist.ProcessGroup] = None\n for key, value in state_dict.items():\n specs[key] = (None, value.size())\n if _is_nested_tensor(value):\n assert len(value.local_shards()) == 1, 'Cannot handle ST with multiple shards'\n assert isinstance(value, ShardedTensor), 'Can only handle nested ShardedTensor'\n shard = value.local_shards()[0]\n specs[key] = (shard.metadata.shard_offsets, shard.metadata.shard_sizes)\n dp_pg = shard.tensor._process_group\n return (specs, dp_pg)", + "docstring": "Load the right TP slice of the optimizer state. This is not easy since the per-tensor slicing can't be inferred from checkpoint metadata. We take advantage of the model state_dict producing a sliced ST to figure out what we need to load. This is pretty fragile and it might be easier for FSDP to compute this info for us. Returns a dictionary where keys are the same of the state_dict and the value is a tuple of (offset, size) for the current rank TP slice. N.B. The state_dict *MUST* come from FSDP.sharded_state_dict.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\optimizer.py", + "ast_data": "FunctionDef name:_get_state_dict_2d_layout arg:state_dict arguments arg For Call Assign Call If Call Compare Call Call Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "unselect", + "source_code": "def unselect(self):\n if sys.platform == 'win32':\n self.dc.SelectObject(wx.NullBitmap)\n self.IsSelected = False", + "docstring": "Select a Null bitmap into this wxDC instance.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py", + "ast_data": "FunctionDef name:unselect arg:self arguments arg If Compare Call Assign" + }, + { + "library": "matplotlib", + "name": "_draw_idle", + "source_code": "def _draw_idle(self):\n with self._idle_draw_cntx():\n if not self._draw_pending:\n return\n self._draw_pending = False\n self.draw()", + "docstring": "Draw method for singleshot timer This draw method can be added to a singleshot timer, which can accumulate draws while the eventloop is spinning. This method will then only draw the first time and short-circuit the others.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_macosx.py", + "ast_data": "FunctionDef name:_draw_idle arg:self arguments arg With Call If Return return:no Assign Call" + }, + { + "library": "tensorflow", + "name": "_exclude_denylisted_ops", + "source_code": "def _exclude_denylisted_ops(self, node_names):\n return [node_name for node_name in node_names if self._debug_dump.node_op_type(debug_graphs.get_node_name(node_name)) not in self._GRAPH_STRUCT_OP_TYPE_DENYLIST]", + "docstring": "Exclude all nodes whose op types are in _GRAPH_STRUCT_OP_TYPE_DENYLIST. Args: node_names: An iterable of node or graph element names. Returns: A list of node names that are not denylisted.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py", + "ast_data": "FunctionDef name:_exclude_denylisted_ops arg:self arg:node_names arguments arg arg Return return:yes Compare Call Call" + }, + { + "library": "scrapy", + "name": "_find_method", + "source_code": "def _find_method(obj: Any, func: Callable[..., Any]) -> str:\n if obj and hasattr(func, '__func__'):\n members = inspect.getmembers(obj, predicate=inspect.ismethod)\n for name, obj_func in members:\n if obj_func.__func__ is func.__func__:\n return name\n raise ValueError(f'Function {func} is not an instance method in: {obj}')", + "docstring": "Helper function for Request.to_dict", + "type": "function", + "file_path": "scrapy\\scrapy\\http\\request\\__init__.py", + "ast_data": "FunctionDef name:_find_method arg:obj arg:func arguments arg arg If BoolOp Call Assign Call For If Compare Return return:yes Raise Call" + }, + { + "library": "sphinx", + "name": "DurationDomain", + "source_code": "class DurationDomain(Domain):\n name = 'duration'\n\n @property\n def reading_durations(self) -> dict[str, float]:\n return self.data.setdefault('reading_durations', {})\n\n def note_reading_duration(self, duration: float) -> None:\n self.reading_durations[self.env.docname] = duration\n\n def clear(self) -> None:\n self.reading_durations.clear()\n\n def clear_doc(self, docname: str) -> None:\n self.reading_durations.pop(docname, None)\n\n def merge_domaindata(self, docnames: Set[str], otherdata: _DurationDomainData) -> None:\n other_reading_durations = otherdata.get('reading_durations', {})\n docnames_set = frozenset(docnames)\n for docname, duration in other_reading_durations.items():\n if docname in docnames_set:\n self.reading_durations[docname] = duration", + "docstring": "A domain for durations of Sphinx processing.", + "type": "class", + "file_path": "sphinx\\sphinx\\ext\\duration.py", + "ast_data": "ClassDef name:DurationDomain Assign FunctionDef name:reading_durations arg:self arguments arg Return return:yes Call FunctionDef name:note_reading_duration arg:self arg:duration arguments arg arg Assign FunctionDef name:clear arg:self arguments arg Call FunctionDef name:clear_doc arg:self arg:docname arguments arg arg Call FunctionDef name:merge_domaindata arg:self arg:docnames arg:otherdata arguments arg arg arg Assign Call Assign Call For Call If Compare Assign" + }, + { + "library": "tensorflow", + "name": "dump_all_build_commands", + "source_code": "def dump_all_build_commands():\n for build in sorted(Build.all_builds().values(), key=lambda b: str(b.type_)):\n sys.stdout.write(f'# BEGIN {build.type_}\\n')\n for cmd in build.commands():\n sys.stdout.write(' '.join(cmd) + '\\n')\n sys.stdout.write(f'# END {build.type_}\\n')", + "docstring": "Used to generate what commands are run for each build.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\build_tools\\ci\\build.py", + "ast_data": "FunctionDef name:dump_all_build_commands arguments For Call Call Call arguments arg Call Call For Call Call Call Call" + }, + { + "library": "pytorch", + "name": "mark_unbacked", + "source_code": "@forbid_in_graph\ndef mark_unbacked(t, index, strict=False):\n assert not is_traceable_wrapper_subclass(t), 'not implemented yet'\n if isinstance(index, int):\n if strict:\n if not hasattr(t, '_dynamo_strict_unbacked_indices'):\n t._dynamo_strict_unbacked_indices = set()\n t._dynamo_strict_unbacked_indices.add(index)\n return\n if not hasattr(t, '_dynamo_unbacked_indices'):\n t._dynamo_unbacked_indices = set()\n t._dynamo_unbacked_indices.add(index)\n return\n assert isinstance(index, (list, tuple))\n for i in index:\n mark_unbacked(t, i)", + "docstring": "Mark a tensor as having an unbacked dim. This changes the semantics of operations, we will always report the size does not equal zero/one, we will turn asserts on this index into runtime asserts, and if you try to get the real value we will raise an exception. In other words, we will treat this dimension as if it was data dependent (we do not know anything about its value.) For historical reasons, by default if an unbacked dim is specialized, we will happily specialize it and continue. If you want to error in these cases, pass strict=True.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\decorators.py", + "ast_data": "FunctionDef name:mark_unbacked arg:t arg:index arg:strict arguments arg arg arg Call If Call If If Call Assign Call Call Return return:no If Call Assign Call Call Return return:no Call For Call" + }, + { + "library": "cherrypy", + "name": "json_in", + "source_code": "def json_in(content_type=[ntou('application/json'), ntou('text/javascript')], force=True, debug=False, processor=json_processor):\n request = cherrypy.serving.request\n if isinstance(content_type, text_or_bytes):\n content_type = [content_type]\n if force:\n if debug:\n cherrypy.log('Removing body processors %s' % repr(request.body.processors.keys()), 'TOOLS.JSON_IN')\n request.body.processors.clear()\n request.body.default_proc = cherrypy.HTTPError(415, 'Expected an entity of content type %s' % ', '.join(content_type))\n for ct in content_type:\n if debug:\n cherrypy.log('Adding body processor for %s' % ct, 'TOOLS.JSON_IN')\n request.body.processors[ct] = processor", + "docstring": "Add a processor to parse JSON request entities. The default processor places the parsed data into request.json. Incoming request entities which match the given content_type(s) will be deserialized from JSON to the Python equivalent, and the result stored at cherrypy.request.json. The 'content_type' argument may be a Content-Type string or a list of allowable Content-Type strings. If the 'force' argument is True (the default), then entities of other content types will not be allowed; \"415 Unsupported Media Type\" is raised instead. Supply your own processor to use a custom decoder, or to handle the parsed data differently. The processor can be configured via tools.json_in.processor or via the decorator method. Note that the deserializer requires the client send a Content-Length request header, or it will raise \"411 Length Required\". If for any other reason the request entity cannot be deserialized from JSON, it will raise \"400 Bad Request: Invalid JSON document\".", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\jsontools.py", + "ast_data": "FunctionDef name:json_in arg:content_type arg:force arg:debug arg:processor arguments arg arg arg arg Call Call Assign If Call Assign If If Call Call Call Call Assign Call Call For If Call Assign" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, num_packs=1):\n if num_packs < 0:\n raise ValueError('NCCL all-reduce requires num_packs >= 0, but {} is specified'.format(num_packs))\n super(NcclAllReduce, self).__init__(all_reduce_alg='nccl', num_packs=num_packs)", + "docstring": "Initializes the object. Args: num_packs: a non-negative integer. The number of packs to split values into. If zero, no packing will be done. Raises: ValueError: if is negative.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:num_packs arguments arg arg If Compare Raise Call Call Call Call" + }, + { + "library": "pandas", + "name": "_is_uniform_join_units", + "source_code": "def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool:\n first = join_units[0].block\n if first.dtype.kind == 'V':\n return False\n return all((type(ju.block) is type(first) for ju in join_units)) and all((ju.block.dtype == first.dtype or ju.block.dtype.kind in 'iub' for ju in join_units)) and all((not ju.is_na or ju.block.is_extension for ju in join_units))", + "docstring": "Check if the join units consist of blocks of uniform type that can be concatenated using Block.concat_same_type instead of the generic _concatenate_join_units (which uses ).", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\concat.py", + "ast_data": "FunctionDef name:_is_uniform_join_units arg:join_units arguments arg Assign If Compare Return return:yes Return return:yes BoolOp Call Compare Call Call Call BoolOp Compare Compare Call BoolOp" + }, + { + "library": "django", + "name": "queryset", + "source_code": "def queryset(self, request, queryset):\n raise NotImplementedError('subclasses of ListFilter must provide a queryset() method')", + "docstring": "Return the filtered queryset.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\filters.py", + "ast_data": "FunctionDef name:queryset arg:self arg:request arg:queryset arguments arg arg arg Raise Call" + }, + { + "library": "django", + "name": "D", + "source_code": "def D(self):\n return WEEKDAYS_ABBR[self.data.weekday()]", + "docstring": "Day of the week, textual, 3 letters; e.g. 'Fri'", + "type": "method", + "file_path": "django\\django\\utils\\dateformat.py", + "ast_data": "FunctionDef name:D arg:self arguments arg Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "index", + "source_code": "@cherrypy.expose\ndef index(self):\n users = ['Remi', 'Carlos', 'Hendrik', 'Lorenzo Lamas']\n yield self.header()\n yield '

List of users:

'\n for user in users:\n yield ('%s
' % user)\n yield self.footer()", + "docstring": "Stream HTTP response body of generator app index URI.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\tutorial\\tut08_generators_and_yield.py", + "ast_data": "FunctionDef name:index arg:self arguments arg Assign Call For Call" + }, + { + "library": "seaborn", + "name": "calculate_dendrogram", + "source_code": "def calculate_dendrogram(self):\n return hierarchy.dendrogram(self.linkage, no_plot=True, color_threshold=-np.inf)", + "docstring": "Calculates a dendrogram based on the linkage matrix Made a separate function, not a property because don't want to recalculate the dendrogram every time it is accessed. Returns ------- dendrogram : dict Dendrogram dictionary as returned by scipy.cluster.hierarchy .dendrogram. The important key-value pairing is \"reordered_ind\" which indicates the re-ordering of the matrix", + "type": "method", + "file_path": "seaborn\\seaborn\\matrix.py", + "ast_data": "FunctionDef name:calculate_dendrogram arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "infeed_dequeue", + "source_code": "def infeed_dequeue(dtype, shape, name=None):\n if dtype not in _SUPPORTED_INFEED_DTYPES:\n raise TypeError(\"Operation '{}' has type {} which is not a supported TPU infeed type. Supported types are: {}\".format(name, dtype, list(_SUPPORTED_INFEED_DTYPES)))\n return gen_tpu_ops.infeed_dequeue(dtype, shape, name=name)", + "docstring": "A placeholder op for a value that will be fed into the computation. Args: dtype: A . The type of elements in the tensor. shape: A or list of . The shape of the tensor. name: A name for the operation (optional). Returns: A of type . A tensor that will be provided using the infeed mechanism. Raises: TypeError: If 'dtype` is not a supported infeed type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\ops\\tpu_ops.py", + "ast_data": "FunctionDef name:infeed_dequeue arg:dtype arg:shape arg:name arguments arg arg arg If Compare Raise Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "first_call_function_nn_module_stack", + "source_code": "def first_call_function_nn_module_stack(graph: torch.fx.Graph) -> Optional[dict]:\n for node in graph.nodes:\n if node.op == 'call_function' and 'nn_module_stack' in node.meta:\n return node.meta['nn_module_stack']\n return None", + "docstring": "Returns the nn_module_stack of the first call_function node.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\_utils.py", + "ast_data": "FunctionDef name:first_call_function_nn_module_stack arg:graph arguments arg For If BoolOp Compare Compare Return return:yes Return return:no" + }, + { + "library": "django", + "name": "get_dated_items", + "source_code": "def get_dated_items(self):\n qs = self.get_dated_queryset()\n date_list = self.get_date_list(qs, ordering='DESC')\n if not date_list:\n qs = qs.none()\n return (date_list, qs, {})", + "docstring": "Return (date_list, items, extra_context) for this request.", + "type": "method", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "FunctionDef name:get_dated_items arg:self arguments arg Assign Call Assign Call If Assign Call Return return:yes" + }, + { + "library": "django", + "name": "prepare_database", + "source_code": "def prepare_database(self):\n pass", + "docstring": "Hook to do any database check or preparation, generally called before migrating a project or an app.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\base.py", + "ast_data": "FunctionDef name:prepare_database arg:self arguments arg" + }, + { + "library": "scikit-learn", + "name": "decision_function", + "source_code": "@available_if(_estimator_has('decision_function'))\ndef decision_function(self, X):\n check_is_fitted(self)\n return self.estimator_.decision_function(self.transform(X))", + "docstring": "Compute the decision function of `classes_`. Regression and binary classification produce an array of shape [n_samples].", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_selection\\_rfe.py", + "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Call Return return:yes Call Call Call Call" + }, + { + "library": "pandas", + "name": "describe", + "source_code": "@final\ndef describe(self, percentiles=None, include=None, exclude=None) -> Self:\n return describe_ndframe(obj=self, include=include, exclude=exclude, percentiles=percentiles).__finalize__(self, method='describe')", + "docstring": "Generate descriptive statistics. Descriptive statistics include those that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding `includeexclude` description. >>> df.describe(exclude=[object]) # doctest: +SKIP categorical numeric count 3 3.0 unique 3 NaN top f NaN freq 1 NaN mean NaN 2.0 std NaN 1.0 min NaN 1.0 25% NaN 1.5 50% NaN 2.0 75% NaN 2.5 max NaN 3.0", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:describe arg:self arg:percentiles arg:include arg:exclude arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "sphinx", + "name": "class_name", + "source_code": "def class_name(self, cls: type[Any], parts: int=0, aliases: dict[str, str] | None=None) -> str:\n module = cls.__module__\n if module in {'__builtin__', 'builtins'}:\n fullname = cls.__name__\n else:\n fullname = f'{module}.{cls.__qualname__}'\n if parts == 0:\n result = fullname\n else:\n name_parts = fullname.split('.')\n result = '.'.join(name_parts[-parts:])\n if aliases is not None and result in aliases:\n return aliases[result]\n return result", + "docstring": "Given a class object, return a fully-qualified name. This works for things I've tested in matplotlib so far, but may not be completely general.", + "type": "method", + "file_path": "sphinx\\sphinx\\ext\\inheritance_diagram.py", + "ast_data": "FunctionDef name:class_name arg:self arg:cls arg:parts arg:aliases arguments arg arg arg arg Assign If Compare Assign Assign If Compare Assign Assign Call Assign Call If BoolOp Compare Compare Return return:yes Return return:yes" + }, + { + "library": "numpy", + "name": "upper", + "source_code": "def upper(self):\n return asarray(upper(self))", + "docstring": "Return an array with the elements of converted to uppercase. See Also -------- char.upper", + "type": "method", + "file_path": "numpy\\numpy\\_core\\defchararray.py", + "ast_data": "FunctionDef name:upper arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "CodegenSymbol", + "source_code": "class CodegenSymbol(ABC):\n\n @abstractmethod\n def get_name(self) -> str:\n pass\n\n @abstractmethod\n def get_example(self) -> Union[torch.Tensor, sympy.Symbol]:\n pass", + "docstring": "An IR object possibly corresponding to a variable in the wrapper code.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py", + "ast_data": "ClassDef name:CodegenSymbol FunctionDef name:get_name arg:self arguments arg FunctionDef name:get_example arg:self arguments arg" + }, + { + "library": "scipy", + "name": "Ackley02", + "source_code": "class Ackley02(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-32.0] * self.N, [32.0] * self.N))\n self.global_optimum = [[0 for _ in range(self.N)]]\n self.fglob = -200.0\n\n def fun(self, x, *args):\n self.nfev += 1\n return -200 * exp(-0.02 * sqrt(x[0] ** 2 + x[1] ** 2))", + "docstring": "Ackley02 objective function. The Ackley02 [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Ackley02}(x) = -200 e^{-0.02 \\sqrt{x_1^2 + x_2^2}} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_A.py", + "ast_data": "ClassDef name:Ackley02 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "broadcast_shapes", + "source_code": "def broadcast_shapes(*shapes):\n if not shapes:\n return ()\n shapes = [shp if isinstance(shp, tuple | list) else (shp,) for shp in shapes]\n big_shp = max(shapes, key=len)\n out = list(big_shp)\n for shp in shapes:\n if shp is big_shp:\n continue\n for i, x in enumerate(shp, start=-len(shp)):\n if x != 1 and x != out[i]:\n if out[i] != 1:\n raise ValueError('shapes cannot be broadcast to a single shape.')\n out[i] = x\n return (*out,)", + "docstring": "Check if shapes can be broadcast and return resulting shape This is similar to the NumPy `` function but does not check memory consequences of the resulting dense matrix. Parameters ---------- *shapes : tuple of shape tuples The tuple of shapes to be considered for broadcasting. Shapes should be tuples of non-negative integers. Returns ------- new_shape : tuple of integers The shape that results from broadcasting th input shapes.", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\_sputils.py", + "ast_data": "FunctionDef name:broadcast_shapes arguments arg If Return return:no Assign Call Assign Call Assign Call For If Compare For Call Call If BoolOp Compare Compare If Compare Raise Call Assign Return return:yes" + }, + { + "library": "django", + "name": "_migrate_all_forwards", + "source_code": "def _migrate_all_forwards(self, state, plan, full_plan, fake, fake_initial):\n migrations_to_run = {m[0] for m in plan}\n for migration, _ in full_plan:\n if not migrations_to_run:\n break\n if migration in migrations_to_run:\n if 'apps' not in state.__dict__:\n if self.progress_callback:\n self.progress_callback('render_start')\n state.apps\n if self.progress_callback:\n self.progress_callback('render_success')\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\n migrations_to_run.remove(migration)\n return state", + "docstring": "Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\executor.py", + "ast_data": "FunctionDef name:_migrate_all_forwards arg:self arg:state arg:plan arg:full_plan arg:fake arg:fake_initial arguments arg arg arg arg arg arg Assign For If If Compare If Compare If Call If Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "to_ragged_spec", + "source_code": "def to_ragged_spec(spec):\n if not isinstance(spec, tensor.TensorSpec) or spec.shape.rank is None or spec.shape.is_fully_defined():\n return spec\n else:\n ragged_rank = max([axis for axis, size in enumerate(spec.shape.as_list()) if size is None])\n return ragged_tensor.RaggedTensorSpec(shape=spec.shape, dtype=spec.dtype, ragged_rank=ragged_rank, row_splits_dtype=row_splits_dtype)", + "docstring": "Returns the new spec based on RaggedTensors.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\ragged_batch_op.py", + "ast_data": "FunctionDef name:to_ragged_spec arg:spec arguments arg If BoolOp Call Compare Call Return return:yes Assign Call Call Call Compare Return return:yes Call" + }, + { + "library": "pytorch", + "name": "TorchSplit", + "source_code": "class TorchSplit(CallFunction):\n\n def __init__(self, arg, sizes, func=torch.split) -> None:\n super().__init__(func, arg, sizes, _users=MULTIPLE, dim=KeywordArg('dim'))\n\n def _match(self, node: torch.fx.Node, ctx: MatchContext):\n m = super()._match(node, ctx)\n if not m:\n return m\n split_sections = node.args[1]\n if not isinstance(split_sections, (list, tuple)):\n return FailedMatch('split not normalized')\n seen_idxs = OrderedSet[int]()\n for user in node.users:\n if not CallFunction(operator.getitem, Arg(), Arg()).match(user):\n return FailedMatch(f'user of split not a getitem: {user}')\n if not isinstance(user.args[1], int):\n return FailedMatch('only integer getitems are handled')\n if user.args[1] in seen_idxs:\n return FailedMatch(f'duplicate getitem {user.args[1]}')\n if user.args[-1] < 0:\n return FailedMatch('negative index')\n seen_idxs.add(user.args[1])\n return m", + "docstring": "Matches a call to torch.split if it is in a normalized form. Ensures that all users of splits are unique getitems.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\split_cat.py", + "ast_data": "ClassDef name:TorchSplit FunctionDef name:__init__ arg:self arg:arg arg:sizes arg:func arguments arg arg arg arg Call Call Call FunctionDef name:_match arg:self arg:node arg:ctx arguments arg arg arg Assign Call Call If Return return:yes Assign If Call Return return:yes Call Assign Call For If Call Call Call Call Return return:yes Call If Call Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Call Return return:yes" + }, + { + "library": "django", + "name": "get_valid_name", + "source_code": "def get_valid_name(self, name):\n return get_valid_filename(name)", + "docstring": "Return a filename, based on the provided filename, that's suitable for use in the target storage system.", + "type": "method", + "file_path": "django\\django\\core\\files\\storage\\base.py", + "ast_data": "FunctionDef name:get_valid_name arg:self arg:name arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "maybe_reorder_for_minimizing_partition", + "source_code": "def maybe_reorder_for_minimizing_partition(self, nodes: list[BaseSchedulerNode]) -> list[BaseSchedulerNode]:\n from .memory import estimate_peak_memory, prepare_planning_info\n graph_outputs = OrderedSet(V.graph.get_output_names())\n default_peak_memory, name_to_freeable_input_buf = prepare_planning_info(nodes, self.name_to_buf, self.name_to_fused_node, OrderedSet(V.graph.graph_inputs.keys()), graph_outputs)\n reordered_nodes = self.reorder_for_minimizing_partition(nodes)\n reorder_peak_memory, _ = estimate_peak_memory(reordered_nodes, name_to_freeable_input_buf, graph_outputs)\n if reorder_peak_memory < default_peak_memory * 1.1:\n return reordered_nodes\n return nodes", + "docstring": "Reorder nodes to minimize the number of partitions if this only slightly increase peak memory.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:maybe_reorder_for_minimizing_partition arg:self arg:nodes arguments arg arg Assign Call Call Assign Call Call Call Assign Call Assign Call If Compare Return return:yes Return return:yes" + }, + { + "library": "kornia", + "name": "warp_grid3d", + "source_code": "def warp_grid3d(grid: Tensor, src_homo_dst: Tensor) -> Tensor:\n batch_size: int = src_homo_dst.size(0)\n _, depth, height, width, _ = grid.size()\n grid = grid.expand(batch_size, -1, -1, -1, -1)\n if len(src_homo_dst.shape) == 3:\n src_homo_dst = src_homo_dst.view(batch_size, 1, 4, 4)\n flow: Tensor = transform_points(src_homo_dst, grid.to(src_homo_dst))\n return flow.view(batch_size, depth, height, width, 3)", + "docstring": "Compute the grid to warp the coordinates grid by the homography/ies. Args: grid: Unwrapped grid of the shape :math:. src_homo_dst: Homography or homographies (stacked) to transform all points in the grid. Shape of the homography has to be :math: or :math:. Returns: the transformed grid of shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py", + "ast_data": "FunctionDef name:warp_grid3d arg:grid arg:src_homo_dst arguments arg arg Call Assign Call Assign Call If Compare Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_read_bytes", + "source_code": "def _read_bytes(f, n):\n return f.read(n)", + "docstring": "Read the next bytes", + "type": "function", + "file_path": "scipy\\scipy\\io\\_idl.py", + "ast_data": "FunctionDef name:_read_bytes arg:f arg:n arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "_encode", + "source_code": "def _encode(self, messages, encode_empty=False):\n serialized_messages = MessagePartSerializer().dumps(messages)\n return self._encode_parts(serialized_messages, encode_empty=encode_empty)", + "docstring": "Return an encoded version of the messages list which can be stored as plain text. Proxies MessagePartSerializer.dumps and _encoded_parts.", + "type": "method", + "file_path": "django\\django\\contrib\\messages\\storage\\cookie.py", + "ast_data": "FunctionDef name:_encode arg:self arg:messages arg:encode_empty arguments arg arg arg Assign Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "get_schema", + "source_code": "def get_schema(frame, name: str, keys=None, con=None, dtype: DtypeArg | None=None, schema: str | None=None) -> str:\n with pandasSQL_builder(con=con) as pandas_sql:\n return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype, schema=schema)", + "docstring": "Get the SQL db table schema for the given frame. Parameters ---------- frame : DataFrame name : str name of SQL table keys : string or sequence, default: None columns to use a primary key con: ADBC Connection, SQLAlchemy connectable, sqlite3 connection, default: None ADBC provides high performance I/O with native type support, where available. Using SQLAlchemy makes it possible to use any DB supported by that library If a DBAPI2 object, only sqlite3 is supported. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection. schema: str, default: None Optional specifying the schema to be used in creating the table.", + "type": "function", + "file_path": "pandas\\pandas\\io\\sql.py", + "ast_data": "FunctionDef name:get_schema arg:frame arg:name arg:keys arg:con arg:dtype arg:schema arguments arg arg arg arg arg arg With Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_fuse_fx", + "source_code": "def _fuse_fx(model: GraphModule, is_qat: bool, fuse_custom_config: Union[FuseCustomConfig, dict[str, Any], None]=None, backend_config: Union[BackendConfig, dict[str, Any], None]=None) -> GraphModule:\n _check_is_graph_module(model)\n return fuse(model, is_qat, fuse_custom_config, backend_config)", + "docstring": "Internal helper function to fuse modules in preparation for quantization Args: model: GraphModule object from symbolic tracing (torch.fx.symbolic_trace)", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantize_fx.py", + "ast_data": "FunctionDef name:_fuse_fx arg:model arg:is_qat arg:fuse_custom_config arg:backend_config arguments arg arg arg arg Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "constant_to_optimal_zero", + "source_code": "def constant_to_optimal_zero(self, y_true, sample_weight=None):\n return np.zeros_like(y_true)", + "docstring": "Calculate term dropped in loss. With this term added, the loss of perfect predictions is zero.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\_loss\\loss.py", + "ast_data": "FunctionDef name:constant_to_optimal_zero arg:self arg:y_true arg:sample_weight arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "embedding_feature", + "source_code": "@property\ndef embedding_feature(self):\n return HardwareFeature._embedding_feature_proto_to_string(self.tpu_hardware_feature_proto.embedding_feature)", + "docstring": "TPU embedding feature. Returns: An EmbeddingFeature enum.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_hardware_feature.py", + "ast_data": "FunctionDef name:embedding_feature arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_TensorSpecCodec", + "source_code": "class _TensorSpecCodec:\n\n def can_encode(self, pyobj):\n return isinstance(pyobj, TensorSpec) and (not isinstance(pyobj, BoundedTensorSpec))\n\n def do_encode(self, tensor_spec_value, encode_fn):\n encoded_tensor_spec = struct_pb2.StructuredValue()\n encoded_tensor_spec.tensor_spec_value.CopyFrom(struct_pb2.TensorSpecProto(shape=encode_fn(tensor_spec_value.shape).tensor_shape_value, dtype=encode_fn(tensor_spec_value.dtype).tensor_dtype_value, name=tensor_spec_value.name))\n return encoded_tensor_spec\n\n def can_decode(self, value):\n return value.HasField('tensor_spec_value')\n\n def do_decode(self, value, decode_fn):\n name = value.tensor_spec_value.name\n return TensorSpec(shape=decode_fn(struct_pb2.StructuredValue(tensor_shape_value=value.tensor_spec_value.shape)), dtype=decode_fn(struct_pb2.StructuredValue(tensor_dtype_value=value.tensor_spec_value.dtype)), name=name if name else None)", + "docstring": "Codec for .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py", + "ast_data": "ClassDef name:_TensorSpecCodec FunctionDef name:can_encode arg:self arg:pyobj arguments arg arg Return return:yes BoolOp Call Call FunctionDef name:do_encode arg:self arg:tensor_spec_value arg:encode_fn arguments arg arg arg Assign Call Call Call Call Call Return return:yes FunctionDef name:can_decode arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Assign Return return:yes Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "eager_run", + "source_code": "def eager_run(main=None, argv=None) -> NoReturn:\n enable_eager_execution()\n app.run(main, argv)", + "docstring": "Runs the program with an optional main function and argv list. The program will run with eager execution enabled. Example: Args: main: the main function to run. argv: the arguments to pass to it.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:eager_run arg:main arg:argv arguments arg arg Call Call" + }, + { + "library": "pygame", + "name": "map_array", + "source_code": "def map_array(surface, array):\n if array.ndim == 0:\n raise ValueError('array must have at least 1 dimension')\n shape = array.shape\n if shape[-1] != 3:\n raise ValueError('array must be a 3d array of 3-value color data')\n target = numpy_empty(shape[:-1], numpy.int32)\n pix_map_array(target, array, surface)\n return target", + "docstring": "pygame.surfarray.map_array(Surface, array3d): return array2d map a 3d array into a 2d array Convert a 3D array into a 2D array. This will use the given Surface format to control the conversion. Note: arrays do not need to be 3D, as long as the minor axis has three elements giving the component colours, any array shape can be used (for example, a single colour can be mapped, or an array of colours). The array shape is limited to eleven dimensions maximum, including the three element minor axis.", + "type": "function", + "file_path": "pygame\\src_py\\surfarray.py", + "ast_data": "FunctionDef name:map_array arg:surface arg:array arguments arg arg If Compare Raise Call Assign If Compare Raise Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "build_graph", + "source_code": "def build_graph(device, input_shape, perm, datatype, num_iters):\n with ops.device('/%s:0' % device):\n total_size = np.prod(input_shape)\n inp = np.arange(1, total_size + 1, dtype=datatype).reshape(input_shape)\n t = constant_op.constant(inp, shape=input_shape)\n outputs = []\n transpose_op = array_ops.transpose(t, perm)\n outputs.append(transpose_op)\n for _ in range(1, num_iters):\n with ops.control_dependencies([transpose_op]):\n transpose_op = array_ops.transpose(t, perm)\n outputs.append(transpose_op)\n return control_flow_ops.group(*outputs)", + "docstring": "builds a graph containing a sequence of conv2d operations. Args: device: String, the device to run on. input_shape: Shape of the input tensor. perm: A list of ints with the same length as input tensor's dimension. datatype: numpy data type of the input tensor. num_iters: number of iterations to run transpose. Returns: An array of tensors to run()", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\transpose_benchmark.py", + "ast_data": "FunctionDef name:build_graph arg:device arg:input_shape arg:perm arg:datatype arg:num_iters arguments arg arg arg arg arg With Call Assign Call Assign Call Call Assign Call Assign Assign Call Call For Call With Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_verify_tf_cond_branch_vars", + "source_code": "def _verify_tf_cond_branch_vars(vars_, symbol_names, branch_name):\n for name, var_ in zip(symbol_names, vars_):\n if isinstance(var_, variables.Undefined):\n raise ValueError(\"'{}' must also be initialized in the {} branch\".format(name, branch_name))\n if isinstance(var_, variables.UndefinedReturnValue):\n raise ValueError('the {} branch must also have a return statement.'.format(branch_name))", + "docstring": "Verifies variables output by a conditional branch for consistency.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py", + "ast_data": "FunctionDef name:_verify_tf_cond_branch_vars arg:vars_ arg:symbol_names arg:branch_name arguments arg arg arg For Call If Call Raise Call Call If Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "inner_custom_getter", + "source_code": "def inner_custom_getter(getter, *args, **kwargs):\n cast_to_bfloat16 = False\n requested_dtype = kwargs['dtype']\n if requested_dtype == dtypes.bfloat16:\n kwargs['dtype'] = dtypes.float32\n cast_to_bfloat16 = True\n var = getter(*args, **kwargs)\n if cast_to_bfloat16:\n var = math_ops.cast(var, dtypes.bfloat16)\n return var", + "docstring": "Custom getter that forces variables to have type self.variable_type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\bfloat16.py", + "ast_data": "FunctionDef name:inner_custom_getter arg:getter arguments arg arg arg Assign Assign If Compare Assign Assign Assign Call If Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_make_initializable_iterator", + "source_code": "def _make_initializable_iterator(self, shared_name=None):\n del shared_name\n if context.executing_eagerly():\n raise ValueError('Cannot create initializable iterator in Eager mode. Please use `iter()` instead.')\n return self._get_iterator()", + "docstring": "Get an initializable iterator for DistributedDatasetsFromFunctionV1.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py", + "ast_data": "FunctionDef name:_make_initializable_iterator arg:self arg:shared_name arguments arg arg If Call Raise Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "load", + "source_code": "def load(self, stream: _ReadableStream[str | bytes], format: Any) -> None:\n if isinstance(format, str):\n format = self.formats[format]\n frozen = format.load(stream)\n if not isinstance(frozen, dict) or frozen.get('envversion') != self._env_version:\n msg = 'old format'\n raise ValueError(msg)\n index2fn = frozen['docnames']\n self._filenames = dict(zip(index2fn, frozen['filenames'], strict=True))\n self._titles = dict(zip(index2fn, frozen['titles'], strict=True))\n self._all_titles = {}\n for docname in self._titles:\n self._all_titles[docname] = []\n for title, doc_tuples in frozen['alltitles'].items():\n for doc, titleid in doc_tuples:\n self._all_titles[index2fn[doc]].append((title, titleid))\n\n def load_terms(mapping: dict[str, Any]) -> dict[str, set[str]]:\n rv = {}\n for k, v in mapping.items():\n if isinstance(v, int):\n rv[k] = {index2fn[v]}\n else:\n rv[k] = {index2fn[i] for i in v}\n return rv\n self._mapping = load_terms(frozen['terms'])\n self._title_mapping = load_terms(frozen['titleterms'])", + "docstring": "Reconstruct from frozen data.", + "type": "method", + "file_path": "sphinx\\sphinx\\search\\__init__.py", + "ast_data": "FunctionDef name:load arg:self arg:stream arg:format arguments arg arg arg If Call Assign Assign Call If BoolOp Call Compare Call Assign Raise Call Assign Assign Call Call Assign Call Call Assign For Assign For Call For Call FunctionDef name:load_terms arg:mapping arguments arg Assign For Call If Call Assign Assign Return return:yes Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "initializer", + "source_code": "@property\ndef initializer(self):\n if self._initializer is not None:\n return self._initializer\n else:\n raise ValueError('The iterator does not have an initializer. This means it was likely created using `tf.data.Dataset.make_one_shot_iterator()`. For an initializable iterator, use `tf.data.Dataset.make_initializable_iterator()` instead.')", + "docstring": "A that should be run to initialize this iterator. Returns: A that should be run to initialize this iterator Raises: ValueError: If this iterator initializes itself automatically.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py", + "ast_data": "FunctionDef name:initializer arg:self arguments arg If Compare Return return:yes Raise Call" + }, + { + "library": "tensorflow", + "name": "should_stop", + "source_code": "def should_stop(self):\n if self._check_stop():\n return True\n if self._sess:\n return self._wrapped_is_stoppable and self._sess.should_stop()\n return True", + "docstring": "Return true if this session should not be used anymore. Always return True if the session was closed. Returns: True if the session should stop, False otherwise.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py", + "ast_data": "FunctionDef name:should_stop arg:self arguments arg If Call Return return:yes If Return return:yes BoolOp Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n X = validate_data(self, X, accept_sparse='csc')\n random_state = check_random_state(self.random_state)\n n_features = X.shape[1]\n if self.coef0 != 0:\n n_features += 1\n self.indexHash_ = random_state.randint(0, high=self.n_components, size=(self.degree, n_features))\n self.bitHash_ = random_state.choice(a=[-1, 1], size=(self.degree, n_features))\n self._n_features_out = self.n_components\n return self", + "docstring": "Fit the model with X. Initializes the internal variables. The method needs no information about the distribution of data, so we only care about n_features in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the instance itself.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\kernel_approximation.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Assign Call Assign If Compare Assign Call Assign Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "History", + "source_code": "class History(Callback):\n\n def __init__(self):\n super(History, self).__init__()\n self.history = {}\n\n def on_train_begin(self, logs=None):\n self.epoch = []\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n self.epoch.append(epoch)\n for k, v in logs.items():\n self.history.setdefault(k, []).append(v)\n self.model.history = self", + "docstring": "Callback that records events into a object. This callback is automatically applied to every Keras model. The object gets returned by the method of models. Example: >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> model.compile(tf.keras.optimizers.SGD(), loss='mse') >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), ... epochs=10, verbose=1) >>> print(history.params) {'verbose': 1, 'epochs': 10, 'steps': 1} >>> # check the keys of history object >>> print(history.history.keys()) dict_keys(['loss'])", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "ClassDef name:History FunctionDef name:__init__ arg:self arguments arg Call Call Assign FunctionDef name:on_train_begin arg:self arg:logs arguments arg arg Assign FunctionDef name:on_epoch_end arg:self arg:epoch arg:logs arguments arg arg arg Assign BoolOp Call For Call Call Call Assign" + }, + { + "library": "django", + "name": "StaticFilesHandler", + "source_code": "class StaticFilesHandler(StaticFilesHandlerMixin, WSGIHandler):\n\n def __init__(self, application):\n self.application = application\n self.base_url = urlparse(self.get_base_url())\n super().__init__()\n\n def __call__(self, environ, start_response):\n if not self._should_handle(get_path_info(environ)):\n return self.application(environ, start_response)\n return super().__call__(environ, start_response)", + "docstring": "WSGI middleware that intercepts calls to the static files directory, as defined by the STATIC_URL setting, and serves those files.", + "type": "class", + "file_path": "django\\django\\contrib\\staticfiles\\handlers.py", + "ast_data": "ClassDef name:StaticFilesHandler FunctionDef name:__init__ arg:self arg:application arguments arg arg Assign Assign Call Call Call Call FunctionDef name:__call__ arg:self arg:environ arg:start_response arguments arg arg arg If Call Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "sym_not", + "source_code": "def sym_not(a):\n import sympy\n if overrides.has_torch_function_unary(a):\n return overrides.handle_torch_function(sym_not, (a,), a)\n if hasattr(a, '__sym_not__'):\n return a.__sym_not__()\n if isinstance(a, sympy.Basic):\n return ~a\n return not a", + "docstring": "SymInt-aware utility for logical negation. Args: a (SymBool or bool): Object to negate", + "type": "function", + "file_path": "pytorch\\torch\\__init__.py", + "ast_data": "FunctionDef name:sym_not arg:a arguments arg If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "time_and_count", + "source_code": "def time_and_count(fn: Callable[Concatenate[Any, P], T]) -> Callable[Concatenate[Any, P], T]:\n\n @wraps(fn)\n def wrapper(self: Any, *args: P.args, **kwargs: P.kwargs) -> T:\n fn_qual_name = f'{self.__class__.__name__}.{fn.__name__}'\n counters['inductor'][f'benchmarking.{fn_qual_name}'] += 1\n with dynamo_timed(fn_qual_name, log_pt2_compile_event=False):\n return fn(self, *args, **kwargs)\n return wrapper", + "docstring": "Wraps with context, and increments the appropriate dynamo counters. It is expected that is a method of or one of its subclasses; typing limitations prevent us from declaring this directly.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\runtime\\benchmarking.py", + "ast_data": "FunctionDef name:time_and_count arg:fn arguments arg FunctionDef name:wrapper arg:self arguments arg arg arg Assign With Call Return return:yes Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_create_default_local_metadata", + "source_code": "def _create_default_local_metadata(state_dict: STATE_DICT_TYPE) -> Metadata:\n plan = _create_default_metadata_only_plan(state_dict)\n _, md = create_default_global_save_plan([plan])\n return md", + "docstring": "Return the ``.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\default_planner.py", + "ast_data": "FunctionDef name:_create_default_local_metadata arg:state_dict arguments arg Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "num_replicas_in_sync", + "source_code": "@property\ndef num_replicas_in_sync(self):\n return self._num_replicas_in_sync", + "docstring": "Returns the number of compute replicas in sync.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:num_replicas_in_sync arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, delta=1.0, reduction=losses_utils.ReductionV2.AUTO, name='huber_loss'):\n super().__init__(huber, name=name, reduction=reduction, delta=delta)", + "docstring": "Initializes instance. Args: delta: A float, the point where the Huber loss function changes from a quadratic to linear. reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. Defaults to 'huber_loss'.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:delta arg:reduction arg:name arguments arg arg arg arg Call Call" + }, + { + "library": "matplotlib", + "name": "new_saved_frame_seq", + "source_code": "def new_saved_frame_seq(self):\n return self.new_frame_seq()", + "docstring": "Return a new sequence of saved/cached frame information.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\animation.py", + "ast_data": "FunctionDef name:new_saved_frame_seq arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_min_max_value", + "source_code": "def get_min_max_value(self) -> tuple[float, float]:\n total_freq = sum(self._hist_freq)\n hist_freq_cumsum = np.cumsum(self._hist_freq) / total_freq\n min_quantile, max_quantile = (self._calib_opts.calibration_parameters.min_percentile / 100.0, self._calib_opts.calibration_parameters.max_percentile / 100.0)\n min_quantile_idx, max_quantile_idx = (np.searchsorted(hist_freq_cumsum, min_quantile, side='right'), np.searchsorted(hist_freq_cumsum, max_quantile, side='left'))\n min_value, max_value = (self._hist_mids[min_quantile_idx], self._hist_mids[max_quantile_idx])\n return (min_value, max_value)", + "docstring": "Calculates min and max from statistics using calibration options. A \"percentile\" is a statistical concept that represents the value below which a given percentage of data falls in a dataset. It involves sorting the data from smallest to largest and then finding the value at a specified percentage position. For example, the 0.01 percentile represents the value in a given data set that corresponds to the lowest 0.01% of the data. HistogramPercentile calibration uses min_percentile and max_percentile to find min and max. min_percentile and max_percentile must be in range [0, 100]. min_percentile is 0.001 by default. max_percentile is 99.999 by default. Returns: (min_value, max_value): Min and max calculated using HistogramPercentile", + "type": "method", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py", + "ast_data": "FunctionDef name:get_min_max_value arg:self arguments arg Assign Call Assign Call Assign Assign Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, dropout=0.0):\n super().__init__()\n self.dropout = dropout", + "docstring": "Processes a projected query and key-value pair to apply scaled dot product attention. Args: dropout (float): probability of dropping an attention weight. Examples:: >>> SDP = torchtext.models.ScaledDotProduct(0.1) >>> q = torch.randn(256, 21, 3) >>> k = v = torch.randn(256, 21, 3) >>> attn_output, attn_weights = SDP(q, k, v) >>> print(attn_output.shape, attn_weights.shape) torch.Size([256, 21, 3]) torch.Size([256, 21, 21])", + "type": "method", + "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchaudio_models.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:dropout arguments arg arg Call Call Assign" + }, + { + "library": "pytorch", + "name": "shard", + "source_code": "@abc.abstractmethod\ndef shard(self, module: nn.Module) -> nn.Module:\n pass", + "docstring": "Shard a module base on the implementation of this method, and return the sharded version of the module. Args: module (:class:): The module to apply sharding to. Returns: A :class: object that represents a module that's already been sharded.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharder.py", + "ast_data": "FunctionDef name:shard arg:self arg:module arguments arg arg" + }, + { + "library": "tensorflow", + "name": "_f", + "source_code": "def _f(file_index):\n\n def _print_cache():\n replica_str = '%d' % file_index\n if self._parameters.trace_dir:\n output_path = os.path.join(self._parameters.trace_dir, _COMPACT_TRACE_FILE_PREFIX) + replica_str + self._get_outfile_suffix()\n output_stream = _OUTPUT_STREAM_ESCAPE + output_path\n else:\n output_stream = sys.stderr\n new_step_line = _REPLICA_ID_TAG + replica_str\n print_ops = []\n if self._parameters.inspect_trace:\n if self._num_signature_dimensions() > 1:\n raise ValueError('Inspecting multi signatures are not supported.')\n if self._parameters.trace_mode in tensor_tracer_flags.TRACE_MODE_HISTORY:\n print_ops.append(self._inspect_history_cache(cache=cache, replica_id=replica_id, step_num=step_num, tensor_trace_order=tensor_trace_order))\n else:\n print_ops.append(self._inspect_summary_cache(cache=cache, replica_id=replica_id, step_num=step_num, output_stream=output_stream, tensor_trace_order=tensor_trace_order))\n else:\n for i in range(self._num_signature_dimensions()):\n print_ops.append(logging_ops.print_v2(new_step_line, '\\n', cache[:, i], '\\n', summarize=-1, output_stream=output_stream))\n with ops.control_dependencies(print_ops):\n return constant_op.constant(0).op\n return _print_cache", + "docstring": "Generates a func that flushes the cache to a file.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:_f arg:file_index arguments arg FunctionDef name:_print_cache arguments Assign If Assign Call Call Assign Assign Assign Assign If If Compare Call Raise Call If Compare Call Call Call Call For Call Call Call Call With Call Return return:yes Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "__call__", + "source_code": "def __call__(self, t):\n t = np.asarray(t)\n return np.power.outer(1 - t, self._orders[::-1]) * np.power.outer(t, self._orders) @ self._px", + "docstring": "Evaluate the Bézier curve at point(s) *t* in [0, 1]. Parameters ---------- t : (k,) array-like Points at which to evaluate the curve. Returns ------- (k, d) array Value of the curve for each point in *t*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\bezier.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:t arguments arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "english_upper", + "source_code": "def english_upper(s):\n uppered = s.translate(UPPER_TABLE)\n return uppered", + "docstring": "Apply English case rules to convert ASCII strings to all upper case. This is an internal utility function to replace calls to str.upper() such that we can avoid changing behavior with changing locales. In particular, Turkish has distinct dotted and dotless variants of the Latin letter \"I\" in both lowercase and uppercase. Thus, \"i\".upper() != \"I\" in a \"tr\" locale. Parameters ---------- s : str Returns ------- uppered : str Examples -------- >>> from numpy._core.numerictypes import english_upper >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' >>> english_upper('') ''", + "type": "function", + "file_path": "numpy\\numpy\\_core\\_string_helpers.py", + "ast_data": "FunctionDef name:english_upper arg:s arguments arg Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "maxes_of_dots", + "source_code": "def maxes_of_dots(self, arrays):\n feature_scores = [0] * len(arrays)\n for i, sd in enumerate(arrays):\n for j, sd2 in enumerate(arrays[i + 1:]):\n corr_temp = np.dot(sd.T, sd2)\n feature_scores[i] += np.max(corr_temp, axis=1)\n feature_scores[j + i + 1] += np.max(corr_temp, axis=0)\n return feature_scores", + "docstring": "A magical feature score for each feature in each dataset :ref:. If arrays are column-wise zscore-d before computation it results in characterizing each column in each array with sum of maximal correlations of that column with columns in other arrays. Arrays must agree only on the first dimension. Numpy uses this as a simultaneous benchmark of 1) dot products and 2) max(, axis=).", + "type": "method", + "file_path": "numpy\\benchmarks\\benchmarks\\bench_app.py", + "ast_data": "FunctionDef name:maxes_of_dots arg:self arg:arrays arguments arg arg Assign Call For Call For Call Assign Call Call Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "__contains__", + "source_code": "def __contains__(self, key):\n if not self.loaded:\n self.load()\n return key in self._data", + "docstring": "Check if the session has an object by key.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:__contains__ arg:self arg:key arguments arg arg If Call Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "decorated", + "source_code": "def decorated(*args, **kwds):\n _, grad = val_and_grad_function(f, params=params)(*args, **kwds)\n return grad", + "docstring": "Computes the gradient of the decorated function.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py", + "ast_data": "FunctionDef name:decorated arguments arg arg Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "shallow_copy_with_tensor_meta", + "source_code": "def shallow_copy_with_tensor_meta(self, tensor_meta: Optional[TensorMeta]) -> 'DTensorSpec':\n assert tensor_meta is not None, 'shallow copy with no tensor_meta!'\n return DTensorSpec(self.mesh, self.placements, tensor_meta=tensor_meta)", + "docstring": "Shallow copy the DTensorSpec with a new tensor_meta.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_dtensor_spec.py", + "ast_data": "FunctionDef name:shallow_copy_with_tensor_meta arg:self arg:tensor_meta arguments arg arg Compare Return return:yes Call" + }, + { + "library": "scipy", + "name": "_get_nom_val", + "source_code": "@staticmethod\ndef _get_nom_val(atrv):\n m = r_nominal.match(atrv)\n if m:\n attrs, _ = split_data_line(m.group(1))\n return tuple(attrs)\n else:\n raise ValueError('This does not look like a nominal string')", + "docstring": "Given a string containing a nominal type, returns a tuple of the possible values. A nominal type is defined as something framed between braces ({}). Parameters ---------- atrv : str Nominal type definition Returns ------- poss_vals : tuple possible values Examples -------- >>> from scipy.io.arff._arffread import NominalAttribute >>> NominalAttribute._get_nom_val(\"{floup, bouga, fl, ratata}\") ('floup', 'bouga', 'fl', 'ratata')", + "type": "method", + "file_path": "scipy\\scipy\\io\\arff\\_arffread.py", + "ast_data": "FunctionDef name:_get_nom_val arg:atrv arguments arg Assign Call If Assign Call Call Return return:yes Call Raise Call" + }, + { + "library": "tensorflow", + "name": "_fetch_preprocessing_callback", + "source_code": "def _fetch_preprocessing_callback(fetch):\n if isinstance(fetch, ops.Operation):\n operation_fetches.append(fetch)\n return fetch\n elif isinstance(fetch, meta_graph_pb2.TensorInfo):\n tensor_infos.append(fetch)\n decoded = _get_element_from_tensor_info(fetch, self._func_graph)\n if tensor_util.is_tf_type(decoded) or isinstance(decoded, composite_tensor.CompositeTensor):\n tensor_fetches.append(decoded)\n else:\n operation_fetches.append(decoded)\n return decoded\n elif isinstance(fetch, (tensor_lib.Tensor, composite_tensor.CompositeTensor)):\n tensor_fetches.append(fetch)\n return fetch\n else:\n graph_element = self.graph.as_graph_element(fetch)\n return _fetch_preprocessing_callback(graph_element)", + "docstring": "Extract out lists of ops, tensors, and tensor type info. Turns TensorInfos into Tensors in the original structure. Also extracts ops from . Args: fetch: The fetch to preprocess: Tensor, TensorInfo, or Operation, or string identifying a Tensor or Operation. Returns: converted to a Tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\wrap_function.py", + "ast_data": "FunctionDef name:_fetch_preprocessing_callback arg:fetch arguments arg If Call Call Return return:yes If Call Call Assign Call If BoolOp Call Call Call Call Return return:yes If Call Call Return return:yes Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "build_attrs", + "source_code": "def build_attrs(self, base_attrs, extra_attrs=None):\n attrs = super().build_attrs(base_attrs, extra_attrs=extra_attrs)\n attrs.setdefault('class', '')\n attrs.update({'data-ajax--cache': 'true', 'data-ajax--delay': 250, 'data-ajax--type': 'GET', 'data-ajax--url': self.get_url(), 'data-app-label': self.field.model._meta.app_label, 'data-model-name': self.field.model._meta.model_name, 'data-field-name': self.field.name, 'data-theme': 'admin-autocomplete', 'data-allow-clear': json.dumps(not self.is_required), 'data-placeholder': '', 'lang': self.i18n_name, 'class': attrs['class'] + (' ' if attrs['class'] else '') + 'admin-autocomplete'})\n return attrs", + "docstring": "Set select2's AJAX attributes. Attributes can be set using the html5 data attribute. Nested attributes require a double dash as per", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\widgets.py", + "ast_data": "FunctionDef name:build_attrs arg:self arg:base_attrs arg:extra_attrs arguments arg arg arg Assign Call Call Call Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "n_features_in_", + "source_code": "@property\ndef n_features_in_(self):\n return self.dictionary.shape[1]", + "docstring": "Number of features seen during .", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py", + "ast_data": "FunctionDef name:n_features_in_ arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_DefaultDistributionStrategyV1", + "source_code": "class _DefaultDistributionStrategyV1(StrategyV1):\n\n def __init__(self):\n if not _creating_default_strategy_singleton:\n raise RuntimeError('Should only create a single instance of _DefaultDistributionStrategy')\n super(_DefaultDistributionStrategyV1, self).__init__(_DefaultDistributionExtended(self))\n\n def __deepcopy__(self, memo):\n del memo\n raise RuntimeError('Should only create a single instance of _DefaultDistributionStrategy')", + "docstring": "Default if none is explicitly selected.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "ClassDef name:_DefaultDistributionStrategyV1 FunctionDef name:__init__ arg:self arguments arg If Raise Call Call Call Call FunctionDef name:__deepcopy__ arg:self arg:memo arguments arg arg Raise Call" + }, + { + "library": "numpy", + "name": "_rsplit", + "source_code": "@array_function_dispatch(_split_dispatcher)\ndef _rsplit(a, sep=None, maxsplit=None):\n return _vec_string(a, np.object_, 'rsplit', [sep] + _clean_args(maxsplit))", + "docstring": "For each element in , return a list of the words in the string, using as the delimiter string. Calls :meth: element-wise. Except for splitting from the right, behaves like . Parameters ---------- a : array-like, with `sepmaxsplitmaxsplit` splits are done, the rightmost ones. Returns ------- out : ndarray Array of list objects See Also -------- str.rsplit, split Examples -------- >>> import numpy as np >>> a = np.array(['aAaAaA', 'abBABba']) >>> np.strings.rsplit(a, 'A') # doctest: +SKIP array([list(['a', 'a', 'a', '']), # doctest: +SKIP list(['abB', 'Bba'])], dtype=object) # doctest: +SKIP", + "type": "function", + "file_path": "numpy\\numpy\\_core\\strings.py", + "ast_data": "FunctionDef name:_rsplit arg:a arg:sep arg:maxsplit arguments arg arg arg Return return:yes Call Call Call" + }, + { + "library": "numpy", + "name": "__rpow__", + "source_code": "def __rpow__(self, other):\n return power(other, self)", + "docstring": "Raise other to the power self, masking the potential NaNs/Infs", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__rpow__ arg:self arg:other arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_default_signal", + "source_code": "def _get_default_signal() -> signal.Signals:\n if IS_WINDOWS:\n return signal.CTRL_C_EVENT\n else:\n return signal.SIGTERM", + "docstring": "Get the default termination signal. SIGTERM for unix, CTRL_C_EVENT for windows.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\subprocess_handler\\subprocess_handler.py", + "ast_data": "FunctionDef name:_get_default_signal arguments If Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "prescreen_choices", + "source_code": "@staticmethod\ndef prescreen_choices(choices: list[ChoiceCaller]) -> list[ChoiceCaller]:\n from .codegen.cuda.cuda_kernel import CUDATemplateCaller\n candidates = []\n if config.cuda.cutlass_prescreening and len(config.cuda.cutlass_max_profiling_swizzle_options) > 1:\n candidates.extend([c for c in choices if isinstance(c, CUDATemplateCaller) if c.info_dict().get('swizzle') == '2'])\n if len(candidates) < 10:\n return []\n return candidates", + "docstring": "Add prescreening phase. Motivation is to reduce the number of autotuning needed, for example, when there are runtime params.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py", + "ast_data": "FunctionDef name:prescreen_choices arg:choices arguments arg Assign If BoolOp Compare Call Call Call Compare Call Call If Compare Call Return return:no Return return:yes" + }, + { + "library": "pytorch", + "name": "reify_object", + "source_code": "def reify_object(o, s):\n if hasattr(o, '__slots__'):\n return _reify_object_slots(o, s)\n else:\n return _reify_object_dict(o, s)", + "docstring": "Reify a Python object with a substitution >>> # xdoctest: +SKIP >>> class Foo(object): ... def __init__(self, a, b): ... self.a = a ... self.b = b ... ... def __str__(self): ... return \"Foo(%s, %s)\" % (str(self.a), str(self.b)) >>> x = var(\"x\") >>> f = Foo(1, x) >>> print(f) Foo(1, ~x) >>> print(reify_object(f, {x: 2})) Foo(1, 2)", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\unification\\more.py", + "ast_data": "FunctionDef name:reify_object arg:o arg:s arguments arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "decide_compile_threads", + "source_code": "def decide_compile_threads() -> int:\n import logging\n log = logging.getLogger(__name__)\n if 'TORCHINDUCTOR_COMPILE_THREADS' in os.environ:\n compile_threads = int(os.environ['TORCHINDUCTOR_COMPILE_THREADS'])\n log.info('compile_threads set to %d via env', compile_threads)\n elif sys.platform == 'win32':\n compile_threads = 1\n log.info('compile_threads set to 1 for win32')\n elif is_fbcode() and (not parallel_compile_enabled_internally()):\n compile_threads = 1\n log.info('compile_threads set to 1 in fbcode')\n else:\n cpu_count = len(os.sched_getaffinity(0)) if hasattr(os, 'sched_getaffinity') else os.cpu_count()\n assert cpu_count\n compile_threads = min(32, cpu_count)\n log.info('compile_threads set to %d', compile_threads)\n return compile_threads", + "docstring": "Here are the precedence to decide compile_threads 1. User can override it by TORCHINDUCTOR_COMPILE_THREADS. One may want to disable async compiling by setting this to 1 to make pdb happy. 2. Set to 1 if it's win32 platform 3. decide by the number of CPU cores", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\config.py", + "ast_data": "FunctionDef name:decide_compile_threads arguments Assign Call If Compare Assign Call Call If Compare Assign Call If BoolOp Call Call Assign Call Assign Call Call Call Call Assign Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "__call__", + "source_code": "def __call__(self, x, nu=0, extrapolate=None):\n if extrapolate is None:\n extrapolate = self.extrapolate\n x = np.asarray(x)\n x_shape, x_ndim = (x.shape, x.ndim)\n x = np.ascontiguousarray(x.ravel(), dtype=np.float64)\n if extrapolate == 'periodic':\n x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])\n extrapolate = False\n out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)\n self._ensure_c_contiguous()\n self._evaluate(x, nu, extrapolate, out)\n out = out.reshape(x_shape + self.c.shape[2:])\n if self.axis != 0:\n l = list(range(out.ndim))\n l = l[x_ndim:x_ndim + self.axis] + l[:x_ndim] + l[x_ndim + self.axis:]\n out = out.transpose(l)\n return out", + "docstring": "Evaluate the piecewise polynomial or its derivative. Parameters ---------- x : array_like Points to evaluate the interpolant at. nu : int, optional Order of derivative to evaluate. Must be non-negative. extrapolate : {bool, 'periodic', None}, optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. If None (default), use . Returns ------- y : array_like Interpolated values. Shape is determined by replacing the interpolation axis in the original array with the shape of x. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals are considered half-open, ``.", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_interpolate.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:x arg:nu arg:extrapolate arguments arg arg arg arg If Compare Assign Assign Call Assign Assign Call Call If Compare Assign Assign Assign Call Call Call Call Call Assign Call If Compare Assign Call Call Assign Assign Call Return return:yes" + }, + { + "library": "sphinx", + "name": "_unicode_nfc", + "source_code": "def _unicode_nfc(s: str, /) -> str:\n return unicodedata.normalize('NFC', s)", + "docstring": "Normalise the string to NFC form.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\matching.py", + "ast_data": "FunctionDef name:_unicode_nfc arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "mean_squared_logarithmic_error", + "source_code": "@dispatch.add_dispatch_support\ndef mean_squared_logarithmic_error(y_true, y_pred):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n first_log = math_ops.log(backend.maximum(y_pred, backend.epsilon()) + 1.0)\n second_log = math_ops.log(backend.maximum(y_true, backend.epsilon()) + 1.0)\n return backend.mean(math_ops.squared_difference(first_log, second_log), axis=-1)", + "docstring": "Computes the mean squared logarithmic error between and . Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_squared_logarithmic_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_true = np.maximum(y_true, 1e-7) >>> y_pred = np.maximum(y_pred, 1e-7) >>> assert np.allclose( ... loss.numpy(), ... np.mean( ... np.square(np.log(y_true + 1.) - np.log(y_pred + 1.)), axis=-1)) Args: y_true: Ground truth values. shape = . y_pred: The predicted values. shape = . Returns: Mean squared logarithmic error values. shape = .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:mean_squared_logarithmic_error arg:y_true arg:y_pred arguments arg arg Assign Call Assign Call Assign Call Call Call Assign Call Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "is_numeric_tensor", + "source_code": "@tf_export('debugging.is_numeric_tensor', v1=['debugging.is_numeric_tensor', 'is_numeric_tensor'])\n@deprecation.deprecated_endpoints('is_numeric_tensor')\ndef is_numeric_tensor(tensor):\n return isinstance(tensor, tensor_lib.Tensor) and tensor.dtype in NUMERIC_TYPES", + "docstring": "Returns if the elements of are numbers. Specifically, returns if the dtype of is one of the following: * * * * * * * * * * * * * * * * * * * Returns if is of a non-numeric type or if is not a object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py", + "ast_data": "FunctionDef name:is_numeric_tensor arg:tensor arguments arg Return return:yes BoolOp Call Compare Call Call" + }, + { + "library": "tensorflow", + "name": "parse_tensor_name_with_slicing", + "source_code": "def parse_tensor_name_with_slicing(in_str):\n if in_str.count('[') == 1 and in_str.endswith(']'):\n tensor_name = in_str[:in_str.index('[')]\n tensor_slicing = in_str[in_str.index('['):]\n else:\n tensor_name = in_str\n tensor_slicing = ''\n return (tensor_name, tensor_slicing)", + "docstring": "Parse tensor name, potentially suffixed by slicing string. Args: in_str: (str) Input name of the tensor, potentially followed by a slicing string. E.g.: Without slicing string: \"hidden/weights/Variable:0\", with slicing string: \"hidden/weights/Variable:0[1, :]\" Returns: (str) name of the tensor (str) slicing string, if any. If no slicing string is present, return \"\".", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\command_parser.py", + "ast_data": "FunctionDef name:parse_tensor_name_with_slicing arg:in_str arguments arg If BoolOp Compare Call Call Assign Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "NnapiInterfaceWrapper", + "source_code": "class NnapiInterfaceWrapper(torch.nn.Module):\n\n def __init__(self, mod):\n super().__init__()\n self.mod = mod", + "docstring": "NNAPI list-ifying and de-list-ifying wrapper. NNAPI always expects a list of inputs and provides a list of outputs. This module allows us to accept inputs as separate arguments. It returns results as either a single tensor or tuple, matching the original module.", + "type": "class", + "file_path": "pytorch\\torch\\backends\\_nnapi\\prepare.py", + "ast_data": "ClassDef name:NnapiInterfaceWrapper FunctionDef name:__init__ arg:self arg:mod arguments arg arg Call Call Assign" + }, + { + "library": "pytorch", + "name": "DataChunkDF", + "source_code": "class DataChunkDF(DataChunk):\n\n def __iter__(self) -> Iterator[Any]:\n for df in self.items:\n yield from df_wrapper.iterate(df)\n\n def __len__(self) -> int:\n total_len = 0\n for df in self.items:\n total_len += df_wrapper.get_len(df)\n return total_len", + "docstring": "DataChunkDF iterating over individual items inside of DataFrame containers, to access DataFrames user .", + "type": "class", + "file_path": "pytorch\\torch\\utils\\data\\datapipes\\dataframe\\structures.py", + "ast_data": "ClassDef name:DataChunkDF FunctionDef name:__iter__ arg:self arguments arg For Call FunctionDef name:__len__ arg:self arguments arg Assign For Call Return return:yes" + }, + { + "library": "scrapy", + "name": "get", + "source_code": "def get(self, name: _SettingsKeyT, default: Any=None) -> Any:\n return self[name] if self[name] is not None else default", + "docstring": "Get a setting value without affecting its original type. :param name: the setting name :type name: str :param default: the value to return if no setting is found :type default: object", + "type": "method", + "file_path": "scrapy\\scrapy\\settings\\__init__.py", + "ast_data": "FunctionDef name:get arg:self arg:name arg:default arguments arg arg arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "binary_accuracy", + "source_code": "@dispatch.add_dispatch_support\ndef binary_accuracy(y_true, y_pred, threshold=0.5):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n threshold = math_ops.cast(threshold, y_pred.dtype)\n y_pred = math_ops.cast(y_pred > threshold, y_pred.dtype)\n return backend.mean(math_ops.equal(y_true, y_pred), axis=-1)", + "docstring": "Calculates how often predictions match binary labels. Standalone usage: >>> y_true = [[1], [1], [0], [0]] >>> y_pred = [[1], [1], [0], [0]] >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred) >>> assert m.shape == (4,) >>> m.numpy() array([1., 1., 1., 1.], dtype=float32) Args: y_true: Ground truth values. shape = . y_pred: The predicted values. shape = . threshold: (Optional) Float representing the threshold for deciding whether prediction values are 1 or 0. Returns: Binary accuracy values. shape =", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "FunctionDef name:binary_accuracy arg:y_true arg:y_pred arg:threshold arguments arg arg arg Assign Call Assign Call Assign Call Compare Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "check_same_device", + "source_code": "def check_same_device(*args, allow_cpu_scalar_tensors):\n if len(args) <= 1:\n return\n device = None\n for arg in args:\n if isinstance(arg, Number):\n continue\n elif isinstance(arg, TensorLike):\n if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):\n continue\n if device is None:\n device = arg.device\n if device != arg.device:\n msg = 'Tensor on device ' + str(arg.device) + ' is not on the expected device ' + str(device) + '!'\n raise RuntimeError(msg)\n else:\n msg = 'Unexpected type when checking for same device, ' + str(type(arg)) + '!'\n raise RuntimeError(msg)", + "docstring": "Checks that all Tensors in args have the same device. Raises a RuntimeError when: - args contains an object whose type is not Tensor or Number - two Tensor objects in args have different devices, unless one is a CPU scalar tensor and allow_cpu_scalar_tensors is True", + "type": "function", + "file_path": "pytorch\\torch\\_prims_common\\__init__.py", + "ast_data": "FunctionDef name:check_same_device arguments arg arg If Compare Call Return return:no Assign For If Call If Call If BoolOp Call If Compare Assign If Compare Assign Call Call Raise Call Assign Call Call Raise Call" + }, + { + "library": "pytorch", + "name": "torch_name", + "source_code": "def torch_name(self) -> TorchName:\n return _SCALAR_TYPE_TO_TORCH_NAME[self]", + "docstring": "Convert a JitScalarType to a torch type name.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_type_utils.py", + "ast_data": "FunctionDef name:torch_name arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, debug_dump, config):\n self._debug_dump = debug_dump\n self._evaluator = evaluator.ExpressionEvaluator(self._debug_dump)\n self._tensor_filters = {}\n self._build_argument_parsers(config)\n config.set_callback('graph_recursion_depth', self._build_argument_parsers)", + "docstring": "DebugAnalyzer constructor. Args: debug_dump: A DebugDumpDir object. config: A object that carries user-facing configurations.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:debug_dump arg:config arguments arg arg arg Assign Assign Call Assign Call Call" + }, + { + "library": "authlib", + "name": "generate_token", + "source_code": "def generate_token(self, grant_type, client, user=None, scope=None, expires_in=None, include_refresh_token=True):\n func = self._token_generators.get(grant_type)\n if not func:\n func = self._token_generators.get('default')\n if not func:\n raise RuntimeError('No configured token generator')\n return func(grant_type=grant_type, client=client, user=user, scope=scope, expires_in=expires_in, include_refresh_token=include_refresh_token)", + "docstring": "Generate the token dict. :param grant_type: current requested grant_type. :param client: the client that making the request. :param user: current authorized user. :param expires_in: if provided, use this value as expires_in. :param scope: current requested scope. :param include_refresh_token: should refresh_token be included. :return: Token dict", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py", + "ast_data": "FunctionDef name:generate_token arg:self arg:grant_type arg:client arg:user arg:scope arg:expires_in arg:include_refresh_token arguments arg arg arg arg arg arg arg Assign Call If Assign Call If Raise Call Return return:yes Call" + }, + { + "library": "scrapy", + "name": "from_content_type", + "source_code": "def from_content_type(self, content_type: str | bytes, content_encoding: bytes | None=None) -> type[Response]:\n if content_encoding:\n return Response\n mimetype = to_unicode(content_type, encoding='latin-1').split(';')[0].strip().lower()\n return self.from_mimetype(mimetype)", + "docstring": "Return the most appropriate Response class from an HTTP Content-Type header", + "type": "method", + "file_path": "scrapy\\scrapy\\responsetypes.py", + "ast_data": "FunctionDef name:from_content_type arg:self arg:content_type arg:content_encoding arguments arg arg arg If Return return:yes Assign Call Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "compute_buffer_groups", + "source_code": "def compute_buffer_groups(self, lines):\n name_to_group = {}\n for line in lines:\n if isinstance(line, AllocateLine):\n name = line.node.get_name()\n assert name not in name_to_group\n name_to_group[name] = BufferGroup(line.node)\n elif isinstance(line, ReuseLine):\n old_name = line.node.get_name()\n new_name = line.reused_as.get_name()\n assert new_name not in name_to_group\n if old_name in name_to_group:\n name_to_group[old_name].names.append(new_name)\n name_to_group[new_name] = name_to_group[old_name]\n outputs = OrderedSet(V.graph.get_output_names())\n unique_groups = [*{id(g): g for g in name_to_group.values()}.values()]\n for group in unique_groups:\n group.is_output = any((x in outputs for x in group.names))\n assert self.buffer_groups is None\n self.buffer_groups = unique_groups\n return name_to_group", + "docstring": "Populates self.buffer_groups with BufferGroup objects that join allocations with common storage (due to inplace reuse) into a single object.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py", + "ast_data": "FunctionDef name:compute_buffer_groups arg:self arg:lines arguments arg arg Assign For If Call Assign Call Compare Assign Call If Call Assign Call Assign Call Compare If Compare Call Assign Assign Call Call Assign Call Call Call For Assign Call Compare Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "mark_as_return", + "source_code": "def mark_as_return(outputs, acd):\n\n def _mark_as_return(tensor):\n if not tensor_util.is_tf_type(tensor):\n return tensor\n return_tensor = acd.mark_as_return(tensor)\n if getattr(tensor, '_keras_mask', None) is not None:\n return_tensor._keras_mask = acd.mark_as_return(tensor._keras_mask)\n else:\n return_tensor._keras_mask = None\n if getattr(tensor, '_tfp_distribution', None) is not None:\n return_tensor._tfp_distribution = tensor._tfp_distribution\n return return_tensor\n return nest.map_structure(_mark_as_return, outputs)", + "docstring": "Marks as the return values for automatic control deps.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py", + "ast_data": "FunctionDef name:mark_as_return arg:outputs arg:acd arguments arg arg FunctionDef name:_mark_as_return arg:tensor arguments arg If Call Return return:yes Assign Call If Compare Call Assign Call Assign If Compare Call Assign Return return:yes Return return:yes Call" + }, + { + "library": "scipy", + "name": "PermutationTestResult", + "source_code": "@dataclass\nclass PermutationTestResult:\n statistic: float | np.ndarray\n pvalue: float | np.ndarray\n null_distribution: np.ndarray", + "docstring": "Result object returned by . Attributes ---------- statistic : float or ndarray The observed test statistic of the data. pvalue : float or ndarray The p-value for the given alternative. null_distribution : ndarray The values of the test statistic generated under the null hypothesis.", + "type": "class", + "file_path": "scipy\\scipy\\stats\\_resampling.py", + "ast_data": "ClassDef name:PermutationTestResult" + }, + { + "library": "scikit-learn", + "name": "_check_warnings", + "source_code": "def _check_warnings(self, *, params):\n params = {} if params is None else params\n warn_params = {prop for prop, alias in self._requests.items() if alias == WARN and prop in params}\n for param in warn_params:\n warn(f'Support for {param} has recently been added to this class. To maintain backward compatibility, it is ignored now. Using `set_{self.method}_request({param}={{True, False}})` on this method of the class, you can set the request value to False to silence this warning, or to True to consume and use the metadata.')", + "docstring": "Check whether metadata is passed which is marked as WARN. If any metadata is passed which is marked as WARN, a warning is raised. Parameters ---------- params : dict The metadata passed to a method.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py", + "ast_data": "FunctionDef name:_check_warnings arg:self arguments arg arg Assign Compare Assign Call BoolOp Compare Compare For Call" + }, + { + "library": "django", + "name": "SettingsReference", + "source_code": "class SettingsReference(str):\n\n def __new__(self, value, setting_name):\n return str.__new__(self, value)\n\n def __init__(self, value, setting_name):\n self.setting_name = setting_name", + "docstring": "String subclass which references a current settings value. It's treated as the value in memory but serializes to a settings.NAME attribute reference.", + "type": "class", + "file_path": "django\\django\\conf\\__init__.py", + "ast_data": "ClassDef name:SettingsReference FunctionDef name:__new__ arg:self arg:value arg:setting_name arguments arg arg arg Return return:yes Call FunctionDef name:__init__ arg:self arg:value arg:setting_name arguments arg arg arg Assign" + }, + { + "library": "tensorflow", + "name": "_RemoteDataset", + "source_code": "class _RemoteDataset(dataset_ops.DatasetSource):\n\n def __init__(self, graph_def, device, element_spec):\n self._elem_spec = element_spec\n with ops.device(device):\n variant_tensor = ged_ops.dataset_from_graph(graph_def)\n super(_RemoteDataset, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._elem_spec", + "docstring": "Creates a dataset on a given given a graph def.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\distribute.py", + "ast_data": "ClassDef name:_RemoteDataset FunctionDef name:__init__ arg:self arg:graph_def arg:device arg:element_spec arguments arg arg arg arg Assign With Call Assign Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_from_inner_shape", + "source_code": "@classmethod\ndef _from_inner_shape(cls, inner_shape, dtype=None):\n return DynamicRaggedShape([], inner_shape, dtype=dtype)", + "docstring": "Create a shape from inner_shape, where num_row_partitions == 0.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:_from_inner_shape arg:cls arg:inner_shape arg:dtype arguments arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "is_registered", + "source_code": "def is_registered(self, model):\n return model in self._registry", + "docstring": "Check if a model class is registered with this .", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\sites.py", + "ast_data": "FunctionDef name:is_registered arg:self arg:model arguments arg arg Return return:yes Compare" + }, + { + "library": "matplotlib", + "name": "get_siblings", + "source_code": "def get_siblings(self, a):\n siblings = self._mapping.get(a, [a])\n return sorted(siblings, key=self._ordering.get)", + "docstring": "Return all of the items joined with *a*, including itself.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:get_siblings arg:self arg:a arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "__ne__", + "source_code": "def __ne__(self, other):\n return not_equal(self, other)", + "docstring": "Return (self != other) element-wise. See Also -------- not_equal", + "type": "method", + "file_path": "numpy\\numpy\\_core\\defchararray.py", + "ast_data": "FunctionDef name:__ne__ arg:self arg:other arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_from_tensor_list_helper", + "source_code": "def _from_tensor_list_helper(decode_fn, element_spec, tensor_list):\n flat_specs = nest.flatten(element_spec)\n flat_spec_lengths = [len(spec._flat_tensor_specs) for spec in flat_specs]\n if sum(flat_spec_lengths) != len(tensor_list):\n raise ValueError('Expected {} tensors but got {}.'.format(sum(flat_spec_lengths), len(tensor_list)))\n i = 0\n flat_ret = []\n for component_spec, num_flat_values in zip(flat_specs, flat_spec_lengths):\n value = tensor_list[i:i + num_flat_values]\n flat_ret.append(decode_fn(component_spec, value))\n i += num_flat_values\n return nest.pack_sequence_as(element_spec, flat_ret)", + "docstring": "Returns an element constructed from the given spec and tensor list. Args: decode_fn: Method that constructs an element component from the element spec component and a tensor list. element_spec: A nested structure of objects representing to element type specification. tensor_list: A list of tensors to use for constructing the value. Returns: An element constructed from the given spec and tensor list. Raises: ValueError: If the number of tensors needed to construct an element for the given spec does not match the given number of tensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\util\\structure.py", + "ast_data": "FunctionDef name:_from_tensor_list_helper arg:decode_fn arg:element_spec arg:tensor_list arguments arg arg arg Assign Call Assign Call If Compare Call Call Raise Call Call Call Call Assign Assign For Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_decorator", + "source_code": "def _decorator(func):\n opname = func.__name__\n cap_sym_name = sym_name.capitalize()\n func.__doc__ = '\\n Assert the condition `x {sym}` holds element-wise.\\n\\n When running in graph mode, you should add a dependency on this operation\\n to ensure that it runs. Example of adding a dependency to an operation:\\n\\n ```python\\n with tf.control_dependencies([tf.debugging.{opname}(x, y)]):\\n output = tf.reduce_sum(x)\\n ```\\n\\n {sym_name} means, for every element `x[i]` of `x`, we have `x[i] {sym}`.\\n If `x` is empty this is trivially satisfied.\\n\\n Args:\\n x: Numeric `Tensor`.\\n data: The tensors to print out if the condition is False. Defaults to\\n error message and first few entries of `x`.\\n summarize: Print this many entries of each tensor.\\n message: A string to prefix to the default message.\\n name: A name for this operation (optional). Defaults to \"{opname}\".\\n\\n Returns:\\n Op that raises `InvalidArgumentError` if `x {sym}` is False.\\n @compatibility(eager)\\n returns None\\n @end_compatibility\\n\\n Raises:\\n InvalidArgumentError: if the check can be performed immediately and\\n `x {sym}` is False. The check can be performed immediately during\\n eager execution or if `x` is statically known.\\n '.format(sym=sym, sym_name=cap_sym_name, opname=opname)\n return func", + "docstring": "Generated decorator that adds the appropriate docstring to the function for symbol . Args: func: Function for a TensorFlow op Returns: Version of with documentation attached.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py", + "ast_data": "FunctionDef name:_decorator arg:func arguments arg Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "_check_trim", + "source_code": "def _check_trim(self, count: int, max: int, obj: list, element: str, css: str | None=None, value: str='...') -> bool:\n if count > max:\n if element == 'row':\n obj.append(self._generate_trimmed_row(max))\n else:\n obj.append(_element(element, css, value, True, attributes=''))\n return True\n return False", + "docstring": "Indicates whether to break render loops and append a trimming indicator Parameters ---------- count : int The loop count of previous visible items. max : int The allowable rendered items in the loop. obj : list The current render collection of the rendered items. element : str The type of element to append in the case a trimming indicator is needed. css : str, optional The css to add to the trimming indicator element. value : str, optional The value of the elements display if necessary. Returns ------- result : bool Whether a trimming element was required and appended.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\style_render.py", + "ast_data": "FunctionDef name:_check_trim arg:self arg:count arg:max arg:obj arg:element arg:css arg:value arguments arg arg arg arg arg arg arg If Compare If Compare Call Call Call Call Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "GenericIndexCol", + "source_code": "class GenericIndexCol(IndexCol):\n\n @property\n def is_indexed(self) -> bool:\n return False\n\n def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str) -> tuple[Index, Index]:\n assert isinstance(values, np.ndarray), type(values)\n index = RangeIndex(len(values))\n return (index, index)\n\n def set_attr(self) -> None:\n pass", + "docstring": "an index which is not represented in the data of the table", + "type": "class", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "ClassDef name:GenericIndexCol FunctionDef name:is_indexed arg:self arguments arg Return return:yes FunctionDef name:convert arg:self arg:values arg:nan_rep arg:encoding arg:errors arguments arg arg arg arg arg Call Call Assign Call Call Return return:yes FunctionDef name:set_attr arg:self arguments arg" + }, + { + "library": "pygame", + "name": "time", + "source_code": "def time():\n _check_init()\n return _pypm.Time()", + "docstring": "returns the current time in ms of the PortMidi timer pygame.midi.time(): return time The time is reset to 0, when the module is inited.", + "type": "function", + "file_path": "pygame\\src_py\\midi.py", + "ast_data": "FunctionDef name:time arguments Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "create_onnx_friendly_decomposition_table", + "source_code": "def create_onnx_friendly_decomposition_table(registry) -> dict[torch._ops.OperatorBase, Callable]:\n decomposition_table: dict[torch._ops.OperatorBase, Callable] = {}\n _ONNX_SUPPORT_OP_OVERLOADS = _create_onnx_supports_op_overload_table(registry)\n for op_overload, decomp_fn in torch._decomp.decomposition_table.items():\n if 'torch._refs' in decomp_fn.__module__ or op_overload in _ONNX_SUPPORT_OP_OVERLOADS:\n continue\n decomposition_table[op_overload] = decomp_fn\n for op_overload, decomp_fn in torch._decomp.core_aten_decompositions().items():\n if op_overload in _ONNX_SUPPORT_OP_OVERLOADS:\n continue\n decomposition_table[op_overload] = decomp_fn\n return decomposition_table", + "docstring": "This function creates a dictionary of op overloads and their decomposition functions for ops that do not have ONNX symbolic functions. If an op already has an ONNX symbolic function, its decomposition function is excluded from the table. The decomposition table is a subset of PyTorch's built-in aten-to-aten decomposition. Args: registry: The ONNX registry for PyTorch. Returns: Dict[torch._ops.OperatorBase, Callable]: A dictionary that maps op overloads to their corresponding decomposition functions.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\decomposition_table.py", + "ast_data": "FunctionDef name:create_onnx_friendly_decomposition_table arg:registry arguments arg Assign Call For Call If BoolOp Compare Compare Assign For Call Call If Compare Assign Return return:yes" + }, + { + "library": "sphinx", + "name": "get_relative_uri", + "source_code": "def get_relative_uri(self, from_: str, to: str, typ: str | None=None) -> str:\n return relative_uri(self.get_target_uri(from_), self.get_target_uri(to, typ))", + "docstring": "Return a relative URI between two source filenames. :raises: :exc: if there's no way to return a sensible URI.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\__init__.py", + "ast_data": "FunctionDef name:get_relative_uri arg:self arg:from_ arg:to arg:typ arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "score_samples", + "source_code": "def score_samples(self, X):\n score_samples = self.decision_function(X) + self.offset_\n return score_samples", + "docstring": "Raw scoring function of the samples. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Testing data. Returns ------- score_samples : array-like, shape (n_samples,) Unshiffted scoring function values of the samples.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py", + "ast_data": "FunctionDef name:score_samples arg:self arg:X arguments arg arg Assign Call Return return:yes" + }, + { + "library": "authlib", + "name": "delete_client", + "source_code": "def delete_client(self, client, request):\n raise NotImplementedError()", + "docstring": "Delete authorization code from database or cache. Developers MUST implement it in subclass, e.g.:: def delete_client(self, client, request): client.delete() :param client: the instance of OAuth client :param request: formatted request instance", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7592\\endpoint.py", + "ast_data": "FunctionDef name:delete_client arg:self arg:client arg:request arguments arg arg arg Raise Call" + }, + { + "library": "matplotlib", + "name": "interactive", + "source_code": "def interactive(b):\n rcParams['interactive'] = b", + "docstring": "Set whether to redraw after every plotting command (e.g. ).", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\__init__.py", + "ast_data": "FunctionDef name:interactive arg:b arguments arg Assign" + }, + { + "library": "scrapy", + "name": "getint", + "source_code": "def getint(self, name: _SettingsKeyT, default: int=0) -> int:\n return int(self.get(name, default))", + "docstring": "Get a setting value as an int. :param name: the setting name :type name: str :param default: the value to return if no setting is found :type default: object", + "type": "method", + "file_path": "scrapy\\scrapy\\settings\\__init__.py", + "ast_data": "FunctionDef name:getint arg:self arg:name arg:default arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_log_weight_as_image", + "source_code": "def _log_weight_as_image(self, weight, weight_name, epoch):\n w_img = array_ops.squeeze(weight)\n shape = backend.int_shape(w_img)\n if len(shape) == 1:\n w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])\n elif len(shape) == 2:\n if shape[0] > shape[1]:\n w_img = array_ops.transpose(w_img)\n shape = backend.int_shape(w_img)\n w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])\n elif len(shape) == 3:\n if backend.image_data_format() == 'channels_last':\n w_img = array_ops.transpose(w_img, perm=[2, 0, 1])\n shape = backend.int_shape(w_img)\n w_img = array_ops.reshape(w_img, [shape[0], shape[1], shape[2], 1])\n shape = backend.int_shape(w_img)\n if len(shape) == 4 and shape[-1] in [1, 3, 4]:\n summary_ops_v2.image(weight_name, w_img, step=epoch)", + "docstring": "Logs a weight as a TensorBoard image.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:_log_weight_as_image arg:self arg:weight arg:weight_name arg:epoch arguments arg arg arg arg Assign Call Assign Call If Compare Call Assign Call If Compare Call If Compare Assign Call Assign Call Assign Call If Compare Call If Compare Call Assign Call Assign Call Assign Call Assign Call If BoolOp Compare Call Compare Call" + }, + { + "library": "pytorch", + "name": "search_combination", + "source_code": "def search_combination(transfer_rate_bytes_per_sec, node_to_latency_mapping) -> bool:\n partition_to_latency_mapping = get_partition_to_latency_mapping(self.partitions, node_to_latency_mapping)\n cost = get_latency_of_partitioned_graph(self.partitions, partition_to_latency_mapping, transfer_rate_bytes_per_sec)\n if len(self.partitions) == 1:\n return False\n partition_pair: list[int] = []\n for i in range(len(self.partitions) - 1):\n for j in range(i + 1, len(self.partitions)):\n new_cost = try_combining_partitions(i, j, self.partitions[:])\n if new_cost <= cost:\n partition_pair = [i, j]\n cost = new_cost\n reorganize_partitions(self.partitions)\n if len(partition_pair) != 0:\n p0 = self.partitions[partition_pair[0]]\n p1 = self.partitions[partition_pair[1]]\n combine_two_partitions(p0, p1, self.partitions)\n get_bfs_level_partition(self.partitions)\n reset_partition_device(self.partitions)\n get_device_to_partitions_mapping(self.partitions, self.devices)\n return len(partition_pair) != 0", + "docstring": "Given transfer rate between partitions and each node's latency, find two partitions to combine so the cost of the partitions can be reduced. The algorithm is : 1. Go through all the partition pairs and see if any pair of partitions can be combined. 2. Calculate the cost after the combination. 3. Select the minimum cost and combine its corresponding partition pair.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py", + "ast_data": "FunctionDef name:search_combination arg:transfer_rate_bytes_per_sec arg:node_to_latency_mapping arguments arg arg Assign Call Assign Call If Compare Call Return return:yes For Call Call For Call Call Assign Call If Compare Assign Assign Call If Compare Call Assign Assign Call Call Call Call Return return:yes Compare Call" + }, + { + "library": "scikit-learn", + "name": "__getitem__", + "source_code": "def __getitem__(self, index):\n return self.estimators_[index]", + "docstring": "Return the index'th estimator in the ensemble.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_base.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:index arguments arg arg Return return:yes" + }, + { + "library": "kornia", + "name": "_handle_invalid_range", + "source_code": "def _handle_invalid_range(msg: Optional[str], raises: bool, min_val: float | Tensor, max_val: float | Tensor) -> bool:\n err_msg = f'Invalid image value range. Expect [0, 1] but got [{min_val}, {max_val}].'\n if msg is not None:\n err_msg += f'\\n{msg}'\n if raises:\n raise ValueError(err_msg)\n return False", + "docstring": "Helper function to handle invalid range cases.", + "type": "function", + "file_path": "kornia\\kornia\\core\\check.py", + "ast_data": "FunctionDef name:_handle_invalid_range arg:msg arg:raises arg:min_val arg:max_val arguments arg arg arg arg Assign If Compare If Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "__create_write_items__", + "source_code": "def __create_write_items__(self, fqn: str, object: Any) -> list[WriteItem]:\n return [WriteItem(index=MetadataIndex(fqn, chunks.offsets), type=WriteItemType.SHARD, tensor_data=TensorWriteData(chunk=ChunkStorageMetadata(offsets=chunks.offsets, sizes=chunks.sizes), properties=self._storage_meta.properties, size=object.size())) for tensor, chunks in zip(self.local_shards(), self.local_chunks)]", + "docstring": "For compatibility with DCP, we support creation of WriteItems such that they can be saved properly.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_shards_wrapper.py", + "ast_data": "FunctionDef name:__create_write_items__ arg:self arg:fqn arg:object arguments arg arg arg Return return:yes Call Call Call Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "def fit(self, X, y=None, sample_weight=None):\n super().fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight)\n self.offset_ = -self._intercept_\n return self", + "docstring": "Detect the soft boundary of the set of samples X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Set of samples, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None Per-sample weights. Rescale C per sample. Higher weights force the classifier to put more emphasis on these points. Returns ------- self : object Fitted estimator. Notes ----- If X is not a C-ordered contiguous array it is copied.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\svm\\_classes.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Call Call Call Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "k_min", + "source_code": "@property\ndef k_min(self) -> int:\n return self._pre_padding()[0]", + "docstring": "The smallest possible signal index of the STFT. is the index of the left-most non-zero value of the lowest slice . Since the zeroth slice is centered over the zeroth sample of the input signal, is never positive. A detailed example is provided in the :ref: section of the :ref:. See Also -------- k_max: First sample index after signal end not touched by a time slice. lower_border_end: Where pre-padding effects end. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., - . p_range: Determine and validate slice index range. upper_border_begin: Where post-padding effects start. ShortTimeFFT: Class this property belongs to.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_short_time_fft.py", + "ast_data": "FunctionDef name:k_min arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_block_orth", + "source_code": "def _block_orth(self, p1, p2):\n if p1.shape.as_list() != p2.shape.as_list():\n raise ValueError(f'The dimension of the matrices must be the same. Received p1.shape={p1.shape} and p2.shape={p2.shape}.')\n n = p1.shape.as_list()[0]\n kernel2x2 = {}\n eye = linalg_ops_impl.eye(n, dtype=self.dtype)\n kernel2x2[0, 0] = math_ops.matmul(p1, p2)\n kernel2x2[0, 1] = math_ops.matmul(p1, eye - p2)\n kernel2x2[1, 0] = math_ops.matmul(eye - p1, p2)\n kernel2x2[1, 1] = math_ops.matmul(eye - p1, eye - p2)\n return kernel2x2", + "docstring": "Construct a 2 x 2 kernel. Used to construct orthgonal kernel. Args: p1: A symmetric projection matrix. p2: A symmetric projection matrix. Returns: A 2 x 2 kernel [[p1p2, p1(1-p2)], [(1-p1)p2, (1-p1)(1-p2)]]. Raises: ValueError: If the dimensions of p1 and p2 are different.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py", + "ast_data": "FunctionDef name:_block_orth arg:self arg:p1 arg:p2 arguments arg arg arg If Compare Call Call Raise Call Assign Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_fused_normalize_batch_in_training", + "source_code": "def _fused_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001):\n if list(reduction_axes) == [0, 1, 2]:\n normalization_axis = 3\n tf_data_format = 'NHWC'\n else:\n normalization_axis = 1\n tf_data_format = 'NCHW'\n if gamma is None:\n gamma = constant_op.constant(1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])\n if beta is None:\n beta = constant_op.constant(0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])\n return nn.fused_batch_norm(x, gamma, beta, epsilon=epsilon, data_format=tf_data_format)", + "docstring": "Fused version of . Args: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:_fused_normalize_batch_in_training arg:x arg:gamma arg:beta arg:reduction_axes arg:epsilon arguments arg arg arg arg arg If Compare Call Assign Assign Assign Assign If Compare Assign Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "init_from_module_rref", + "source_code": "@staticmethod\ndef init_from_module_rref(remote_device: str, module_rref: rpc.RRef[nn.Module], _module_interface_cls: Any=None):\n remote_module = object.__new__(RemoteModule)\n enable_moving_cpu_tensors_to_cuda = remote_module._prepare_init(remote_device)\n if _module_interface_cls is not None:\n remote_module.is_scriptable = True\n remote_module._init_template(_module_interface_cls, enable_moving_cpu_tensors_to_cuda)\n else:\n remote_module.is_scriptable = False\n remote_module.generated_methods = _NON_SCRIPTABLE_REMOTE_MODULE_MODULE._generated_methods\n remote_module.module_rref = module_rref\n remote_module._install_generated_methods()\n remote_module._check_attribute_picklability()\n return remote_module", + "docstring": "Besides the constructor, a RemoteModule instance can also be initialized given a module RRef. This alternate initialization method can be particularly useful if we want to create multiple RemoteModule instances that share the same underlying module and reduce memory consumption. Moreover, this also provides a workaround for passing script RemoteModule over RPC, which is not supported. The recommended way is as follows: 1. the sender creates a RemoteModule; 2. the sender sends its `~nn.Module` call on the user-provided module on the remote side.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\nn\\api\\remote_module.py", + "ast_data": "FunctionDef name:init_from_module_rref arg:remote_device arg:module_rref arg:_module_interface_cls arguments arg arg arg Assign Call Assign Call If Compare Assign Call Assign Assign Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "add_toplevel", + "source_code": "@staticmethod\ndef add_toplevel(log_level: CompileEventLogLevel, overwrite: bool=False, **metadata: object):\n top_event = get_chromium_event_logger().get_outermost_event()\n if top_event is None:\n raise RuntimeError('No toplevel event active. Please only call this function within a dynamo_timed context.')\n CompileEventLogger.add_data(top_event, log_level, overwrite, **metadata)", + "docstring": "Syntactic sugar for logging to the toplevel event", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:add_toplevel arg:log_level arg:overwrite arguments arg arg arg Assign Call Call If Compare Raise Call Call" + }, + { + "library": "tensorflow", + "name": "starting_wall_time", + "source_code": "def starting_wall_time(self):\n return self._reader.starting_wall_time()", + "docstring": "Wall timestamp for when the debugged TensorFlow program started. Returns: Stating wall time as seconds since the epoch, as a .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", + "ast_data": "FunctionDef name:starting_wall_time arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "disable_tensor_equality", + "source_code": "@tf_export(v1=['disable_tensor_equality'])\ndef disable_tensor_equality():\n logging.vlog(1, 'Disabling tensor equality')\n _tensor_equality_api_usage_gauge.get_cell().set(False)\n Tensor._USE_EQUALITY = False", + "docstring": "Compare Tensors by their id and be hashable. This is a legacy behaviour of TensorFlow and is highly discouraged.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py", + "ast_data": "FunctionDef name:disable_tensor_equality arguments Call Call Call Assign Call" + }, + { + "library": "django", + "name": "FieldGetDbPrepValueMixin", + "source_code": "class FieldGetDbPrepValueMixin:\n get_db_prep_lookup_value_is_iterable = False\n\n def get_db_prep_lookup(self, value, connection):\n field = getattr(self.lhs.output_field, 'target_field', None)\n get_db_prep_value = getattr(field, 'get_db_prep_value', None) or self.lhs.output_field.get_db_prep_value\n if not self.get_db_prep_lookup_value_is_iterable:\n value = [value]\n return ('%s', [v if hasattr(v, 'as_sql') else get_db_prep_value(v, connection, prepared=True) for v in value])", + "docstring": "Some lookups require Field.get_db_prep_value() to be called on their inputs.", + "type": "class", + "file_path": "django\\django\\db\\models\\lookups.py", + "ast_data": "ClassDef name:FieldGetDbPrepValueMixin Assign FunctionDef name:get_db_prep_lookup arg:self arg:value arg:connection arguments arg arg arg Assign Call Assign BoolOp Call If Assign Return return:yes Call Call" + }, + { + "library": "django", + "name": "intersection", + "source_code": "def intersection(self, other):\n return self._topology(capi.geos_intersection(self.ptr, other.ptr))", + "docstring": "Return a Geometry representing the points shared by this Geometry and other.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:intersection arg:self arg:other arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "AUCCurve", + "source_code": "class AUCCurve(Enum):\n ROC = 'ROC'\n PR = 'PR'\n\n @staticmethod\n def from_str(key):\n if key in ('pr', 'PR'):\n return AUCCurve.PR\n elif key in ('roc', 'ROC'):\n return AUCCurve.ROC\n else:\n raise ValueError('Invalid AUC curve value \"%s\".' % key)", + "docstring": "Type of AUC Curve (ROC or PR).", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\metrics_utils.py", + "ast_data": "ClassDef name:AUCCurve Assign Assign FunctionDef name:from_str arg:key arguments arg If Compare Return return:yes If Compare Return return:yes Raise Call" + }, + { + "library": "pytorch", + "name": "remove_end_callback", + "source_code": "def remove_end_callback(self, callback: Callable[[], None]) -> None:\n self.end_callbacks.remove(callback)", + "docstring": "Remove a registered end callback function. Args: - callback (Callable): The callback function to remove.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\callback.py", + "ast_data": "FunctionDef name:remove_end_callback arg:self arg:callback arguments arg arg Call" + }, + { + "library": "scipy", + "name": "cauchy_point", + "source_code": "def cauchy_point(self):\n if self._cauchy_point is None:\n g = self.jac\n Bg = self.hessp(g)\n self._cauchy_point = -(np.dot(g, g) / np.dot(g, Bg)) * g\n return self._cauchy_point", + "docstring": "The Cauchy point is minimal along the direction of steepest descent.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_trustregion_dogleg.py", + "ast_data": "FunctionDef name:cauchy_point arg:self arguments arg If Compare Assign Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_is_scalar_from_shape", + "source_code": "def _is_scalar_from_shape(shape):\n return _logical_equal(_ndims_from_shape(shape), 0)", + "docstring": "Returns if shape implies a scalar.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py", + "ast_data": "FunctionDef name:_is_scalar_from_shape arg:shape arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_config", + "source_code": "@abc.abstractmethod\ndef get_config(self):\n pass", + "docstring": "Returns the config of this loss scale.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py", + "ast_data": "FunctionDef name:get_config arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "_vector_polynomial_value", + "source_code": "def _vector_polynomial_value(poly, x, zero_power=None):\n\n def transition(curr_poly_val, x, poly_coeff):\n res = torch.addcmul(poly_coeff.unsqueeze(-1), x, curr_poly_val)\n return res\n if zero_power is None:\n zero_power = x.new_ones(1).expand(x.shape)\n return _polynomial_value(poly, x, zero_power, transition)", + "docstring": "Evaluates for the (batched) vector input . Check out function for more details.", + "type": "function", + "file_path": "pytorch\\torch\\_lobpcg.py", + "ast_data": "FunctionDef name:_vector_polynomial_value arg:poly arg:x arg:zero_power arguments arg arg arg FunctionDef name:transition arg:curr_poly_val arg:x arg:poly_coeff arguments arg arg arg Assign Call Call Return return:yes If Compare Assign Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "scale_to", + "source_code": "def scale_to(self, scaling: Literal['magnitude', 'psd']):\n if scaling not in (scaling_values := {'magnitude', 'psd'}):\n raise ValueError(f'scaling={scaling!r} not in {scaling_values}!')\n if self._scaling == scaling:\n return\n s_fac = self.fac_psd if scaling == 'psd' else self.fac_magnitude\n self._win = self._win * s_fac\n self.win.setflags(write=False)\n if self._dual_win is not None:\n self._dual_win = self._dual_win / s_fac\n self.dual_win.setflags(write=False)\n self._fac_mag, self._fac_psd = (None, None)\n self._scaling = scaling", + "docstring": "Scale window to obtain 'magnitude' or 'psd' scaling for the STFT. The window of a 'magnitude' spectrum has an integral of one, i.e., unit area for non-negative windows. This ensures that absolute the values of spectrum does not change if the length of the window changes (given the input signal is stationary). To represent the power spectral density ('psd') for varying length windows the area of the absolute square of the window needs to be unity. The property shows the current scaling. The properties and show the scaling factors required to scale the STFT values to a magnitude or a psd spectrum. Note that a window cannot to be scaled to be . Use to create a unitary instance. This method is called, if the initializer parameter is set. See Also -------- fac_magnitude: Scaling factor for to a magnitude spectrum. fac_psd: Scaling factor for to a power spectral density spectrum. fft_mode: Mode of utilized FFT scaling: Normalization applied to the window function. ShortTimeFFT: Class this method belongs to.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_short_time_fft.py", + "ast_data": "FunctionDef name:scale_to arg:self arg:scaling arguments arg arg If Compare Raise Call If Compare Return return:no Assign Compare Assign Call If Compare Assign Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "_run_graph", + "source_code": "def _run_graph(self, device, input_shape, variable, num_inputs, axis, grad, num_iters):\n graph = ops.Graph()\n with graph.as_default():\n outputs = build_graph(device, input_shape, variable, num_inputs, axis, grad)\n config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(optimizer_options=config_pb2.OptimizerOptions(opt_level=config_pb2.OptimizerOptions.L0)))\n with session_lib.Session(graph=graph, config=config) as session:\n variables.global_variables_initializer().run()\n _ = session.run(outputs)\n start_time = time.time()\n for _ in range(num_iters):\n _ = session.run(outputs)\n duration = time.time() - start_time\n print('%s shape:%d/%d var: %r #inputs:%d axis:%d grad:%r - %f secs - %f GB/sec' % (device, input_shape[0], input_shape[1], variable, num_inputs, axis, grad, duration / num_iters, num_inputs * input_shape[0] * input_shape[1] * 4 * 2 * 100 / (duration / num_iters) / 1000000000.0))\n name_template = 'concat_bench_{device}_input_shape_{shape}_variable_{variable}_num_inputs_{num_inputs}_axis_{axis}_grad_{grad}'\n self.report_benchmark(name=name_template.format(device=device, num_inputs=num_inputs, variable=variable, grad=grad, shape=str(input_shape).replace(' ', ''), axis=str(axis), iters=num_iters))\n return duration", + "docstring": "Run the graph and print its execution time. Args: device: string, the device to run on. input_shape: shape of the input tensors. variable: whether or not the input shape should be fixed num_inputs: the number of inputs to concat axis: axis to be concat'ed grad: if True compute the gradient num_iters: number of steps to run. Returns: The duration of the run in seconds.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\concat_benchmark.py", + "ast_data": "FunctionDef name:_run_graph arg:self arg:device arg:input_shape arg:variable arg:num_inputs arg:axis arg:grad arg:num_iters arguments arg arg arg arg arg arg arg arg Assign Call With Call Assign Call Assign Call Call Call With Call Call Call Assign Call Assign Call For Call Assign Call Assign Call Call Assign Call Call Call Call Call Return return:yes" + }, + { + "library": "cryptography", + "name": "_modinv", + "source_code": "def _modinv(e: int, m: int) -> int:\n x1, x2 = (1, 0)\n a, b = (e, m)\n while b > 0:\n q, r = divmod(a, b)\n xn = x1 - q * x2\n a, b, x1, x2 = (b, r, x2, xn)\n return x1 % m", + "docstring": "Modular Multiplicative Inverse. Returns x such that: (x*e) mod m == 1", + "type": "function", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py", + "ast_data": "FunctionDef name:_modinv arg:e arg:m arguments arg arg Assign Assign While Compare Assign Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_benchmarkFeed", + "source_code": "def _benchmarkFeed(self, name, target, size, iters):\n feed_val = np.random.rand(size).astype(np.float32)\n times = []\n with ops.Graph().as_default():\n p = array_ops.placeholder(dtypes.float32, shape=[size])\n no_op = array_ops.identity(p).op\n with session.Session(target) as sess:\n sess.run(no_op, feed_dict={p: feed_val})\n for _ in range(iters):\n start_time = time.time()\n sess.run(no_op, feed_dict={p: feed_val})\n end_time = time.time()\n times.append(end_time - start_time)\n print('%s %d %f' % (name, size, np.median(times)))\n self.report_benchmark(iters=1, wall_time=np.median(times), name=name)", + "docstring": "Runs a microbenchmark to measure the cost of feeding a tensor. Reports the median cost of feeding a tensor of * bytes. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. size: The number of floating-point numbers to be feed. iters: The number of iterations to perform.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\session_benchmark.py", + "ast_data": "FunctionDef name:_benchmarkFeed arg:self arg:name arg:target arg:size arg:iters arguments arg arg arg arg arg Assign Call Call Assign With Call Call Assign Call Assign Call With Call Call For Call Assign Call Call Assign Call Call Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "twin", + "source_code": "def twin(self, aux_trans=None, axes_class=None):\n if aux_trans is None:\n aux_trans = mtransforms.IdentityTransform()\n ax = self._add_twin_axes(axes_class, aux_transform=aux_trans, viewlim_mode='transform')\n self.axis['top', 'right'].set_visible(False)\n ax.axis['top', 'right'].set_visible(True)\n ax.axis['left', 'bottom'].set_visible(False)\n return ax", + "docstring": "Create a twin of Axes with no shared axis. While self will have ticks on the left and bottom axis, the returned axes will have ticks on the top and right axis.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\parasite_axes.py", + "ast_data": "FunctionDef name:twin arg:self arg:aux_trans arg:axes_class arguments arg arg arg If Compare Assign Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n proba = self.predict_proba(X)\n if self.n_outputs_ == 1:\n return self.classes_.take(np.argmax(proba, axis=1), axis=0)\n else:\n n_samples = proba[0].shape[0]\n class_type = self.classes_[0].dtype\n predictions = np.empty((n_samples, self.n_outputs_), dtype=class_type)\n for k in range(self.n_outputs_):\n predictions[:, k] = self.classes_[k].take(np.argmax(proba[k], axis=1), axis=0)\n return predictions", + "docstring": "Predict class for X. The predicted class of an input sample is a vote by the trees in the forest, weighted by their probability estimates. That is, the predicted class is the one with highest mean probability estimate across the trees. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``. Returns ------- y : ndarray of shape (n_samples,) or (n_samples, n_outputs) The predicted classes.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call If Compare Return return:yes Call Call Assign Assign Assign Call For Call Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "_flush", + "source_code": "def _flush(self):\n self.source._flush()\n self._stats_refresh = True", + "docstring": "Call the flush method on the Band's parent raster and force a refresh of the statistics attribute when requested the next time.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py", + "ast_data": "FunctionDef name:_flush arg:self arguments arg Call Assign" + }, + { + "library": "pytorch", + "name": "GeneratedFileCleaner", + "source_code": "class GeneratedFileCleaner:\n\n def __init__(self, keep_intermediates=False):\n self.keep_intermediates = keep_intermediates\n self.files_to_clean = set()\n self.dirs_to_clean = []\n\n def __enter__(self):\n return self\n\n def open(self, fn, *args, **kwargs):\n if not os.path.exists(fn):\n self.files_to_clean.add(os.path.abspath(fn))\n return open(fn, *args, **kwargs)\n\n def makedirs(self, dn, exist_ok=False):\n parent, n = os.path.split(dn)\n if not n:\n parent, n = os.path.split(parent)\n if parent and n and (not os.path.exists(parent)):\n self.makedirs(parent, exist_ok=True)\n if not os.path.isdir(dn) or not exist_ok:\n os.mkdir(dn)\n self.dirs_to_clean.append(os.path.abspath(dn))\n\n def __exit__(self, type, value, traceback):\n if not self.keep_intermediates:\n for f in self.files_to_clean:\n os.unlink(f)\n for d in self.dirs_to_clean[::-1]:\n os.rmdir(d)", + "docstring": "Context Manager to clean up generated files", + "type": "class", + "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py", + "ast_data": "ClassDef name:GeneratedFileCleaner FunctionDef name:__init__ arg:self arg:keep_intermediates arguments arg arg Assign Assign Call Assign FunctionDef name:__enter__ arg:self arguments arg Return return:yes FunctionDef name:open arg:self arg:fn arguments arg arg arg arg If Call Call Call Return return:yes Call FunctionDef name:makedirs arg:self arg:dn arg:exist_ok arguments arg arg arg Assign Call If Assign Call If BoolOp Call Call If BoolOp Call Call Call Call FunctionDef name:__exit__ arg:self arg:type arg:value arg:traceback arguments arg arg arg arg If For Call For Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "@_docstring.interpd\ndef __init__(self, offsetbox, xy, xybox=None, xycoords='data', boxcoords=None, *, frameon=True, pad=0.4, annotation_clip=None, box_alignment=(0.5, 0.5), bboxprops=None, arrowprops=None, fontsize=None, **kwargs):\n martist.Artist.__init__(self)\n mtext._AnnotationBase.__init__(self, xy, xycoords=xycoords, annotation_clip=annotation_clip)\n self.offsetbox = offsetbox\n self.arrowprops = arrowprops.copy() if arrowprops is not None else None\n self.set_fontsize(fontsize)\n self.xybox = xybox if xybox is not None else xy\n self.boxcoords = boxcoords if boxcoords is not None else xycoords\n self._box_alignment = box_alignment\n if arrowprops is not None:\n self._arrow_relpos = self.arrowprops.pop('relpos', (0.5, 0.5))\n self.arrow_patch = FancyArrowPatch((0, 0), (1, 1), **self.arrowprops)\n else:\n self._arrow_relpos = None\n self.arrow_patch = None\n self.patch = FancyBboxPatch(xy=(0.0, 0.0), width=1.0, height=1.0, facecolor='w', edgecolor='k', mutation_scale=self.prop.get_size_in_points(), snap=True, visible=frameon)\n self.patch.set_boxstyle('square', pad=pad)\n if bboxprops:\n self.patch.set(**bboxprops)\n self._internal_update(kwargs)", + "docstring": "Parameters ---------- offsetbox : xy : (float, float) The point *(x, y)* to annotate. The coordinate system is determined by *xycoords*. xybox : (float, float), default: *xy* The position *(x, y)* to place the text at. The coordinate system is determined by *boxcoords*. xycoords : single or two-tuple of str or or or callable, default: 'data' The coordinate system that *xy* is given in. See the parameter *xycoords* in for a detailed description. boxcoords : single or two-tuple of str or or or callable, default: value of *xycoords* The coordinate system that *xybox* is given in. See the parameter *textcoords* in for a detailed description. frameon : bool, default: True By default, the text is surrounded by a white (accessible as the `.AnnotationBbox.FancyBboxPatch.Annotation.FancyBboxPatch.Textlegend.fontsize.Text.set_fontsizeAnnotationBbox.AnnotationBbox.set` for a list.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:offsetbox arg:xy arg:xybox arg:xycoords arg:boxcoords arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg Call Call Assign Assign Compare Call Call Assign Compare Assign Compare Assign If Compare Assign Call Assign Call Assign Assign Assign Call Call Call If Call Call" + }, + { + "library": "django", + "name": "localize_tag", + "source_code": "@register.tag('localize')\ndef localize_tag(parser, token):\n use_l10n = None\n bits = list(token.split_contents())\n if len(bits) == 1:\n use_l10n = True\n elif len(bits) > 2 or bits[1] not in ('on', 'off'):\n raise TemplateSyntaxError(\"%r argument should be 'on' or 'off'\" % bits[0])\n else:\n use_l10n = bits[1] == 'on'\n nodelist = parser.parse(('endlocalize',))\n parser.delete_first_token()\n return LocalizeNode(nodelist, use_l10n)", + "docstring": "Force or prevents localization of values. Sample usage:: {% localize off %} var pi = {{ 3.1415 }}; {% endlocalize %}", + "type": "function", + "file_path": "django\\django\\templatetags\\l10n.py", + "ast_data": "FunctionDef name:localize_tag arg:parser arg:token arguments arg arg Assign Assign Call Call If Compare Call Assign If BoolOp Compare Call Compare Raise Call Assign Compare Assign Call Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "HPacker", + "source_code": "class HPacker(PackerBase):\n\n def _get_bbox_and_child_offsets(self, renderer):\n dpicor = renderer.points_to_pixels(1.0)\n pad = self.pad * dpicor\n sep = self.sep * dpicor\n bboxes = [c.get_bbox(renderer) for c in self.get_visible_children()]\n if not bboxes:\n return (Bbox.from_bounds(0, 0, 0, 0).padded(pad), [])\n (y0, y1), yoffsets = _get_aligned_offsets([bbox.intervaly for bbox in bboxes], self.height, self.align)\n width, xoffsets = _get_packed_offsets([bbox.width for bbox in bboxes], self.width, sep, self.mode)\n x0 = bboxes[0].x0\n xoffsets -= [bbox.x0 for bbox in bboxes] - x0\n return (Bbox.from_bounds(x0, y0, width, y1 - y0).padded(pad), [*zip(xoffsets, yoffsets)])", + "docstring": "HPacker packs its children horizontally, automatically adjusting their relative positions at draw time. .. code-block:: none +-------------------------------+ | Child 1 Child 2 Child 3 | +-------------------------------+", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", + "ast_data": "ClassDef name:HPacker FunctionDef name:_get_bbox_and_child_offsets arg:self arg:renderer arguments arg arg Assign Call Assign Assign Assign Call Call If Return return:yes Call Call Assign Call Assign Call Assign Return return:yes Call Call Call" + }, + { + "library": "django", + "name": "solve_lookup_type", + "source_code": "def solve_lookup_type(self, lookup, summarize=False):\n lookup_splitted = lookup.split(LOOKUP_SEP)\n if self.annotations:\n annotation, expression_lookups = refs_expression(lookup_splitted, self.annotations)\n if annotation:\n expression = self.annotations[annotation]\n if summarize:\n expression = Ref(annotation, expression)\n return (expression_lookups, (), expression)\n _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())\n field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)]\n if len(lookup_parts) > 1 and (not field_parts):\n raise FieldError('Invalid lookup \"%s\" for model %s\".' % (lookup, self.get_meta().model.__name__))\n return (lookup_parts, field_parts, False)", + "docstring": "Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\query.py", + "ast_data": "FunctionDef name:solve_lookup_type arg:self arg:lookup arg:summarize arguments arg arg arg Assign Call If Assign Call If Assign If Assign Call Return return:yes Assign Call Call Assign Call Call If BoolOp Compare Call Raise Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "non_null_counts", + "source_code": "@property\n@abstractmethod\ndef non_null_counts(self) -> list[int] | Series:\n pass", + "docstring": "Sequence of non-null counts for all columns or column (if series).", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\info.py", + "ast_data": "FunctionDef name:non_null_counts arg:self arguments arg" + }, + { + "library": "matplotlib", + "name": "get_label_text", + "source_code": "def get_label_text(self):\n return self.label.get_text()", + "docstring": "Get the text of the label.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:get_label_text arg:self arguments arg Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "resume", + "source_code": "@cherrypy.expose\ndef resume(self, namespace):\n logging.statistics.get(namespace, {})['Enabled'] = True\n raise cherrypy.HTTPRedirect('./')", + "docstring": "Resume gathering the statistics.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py", + "ast_data": "FunctionDef name:resume arg:self arg:namespace arguments arg arg Assign Call Raise Call" + }, + { + "library": "pandas", + "name": "get_dummies", + "source_code": "@forbid_nonstring_types(['bytes'])\ndef get_dummies(self, sep: str='|', dtype: NpDtype | None=None):\n from pandas.core.frame import DataFrame\n if dtype is not None and (not (is_numeric_dtype(dtype) or is_bool_dtype(dtype))):\n raise ValueError(\"Only numeric or boolean dtypes are supported for 'dtype'\")\n result, name = self._data.array._str_get_dummies(sep, dtype)\n if is_extension_array_dtype(dtype):\n return self._wrap_result(DataFrame(result, columns=name, dtype=dtype), name=name, returns_string=False)\n return self._wrap_result(result, name=name, expand=True, returns_string=False)", + "docstring": "Return DataFrame of dummy/indicator variables for Series. Each string in Series is split by sep and returned as a DataFrame of dummy/indicator variables. Parameters ---------- sep : str, default \"|\" String to split on. dtype : dtype, default np.int64 Data type for new columns. Only a single dtype is allowed. Returns ------- DataFrame Dummy variables corresponding to values of the Series. See Also -------- get_dummies : Convert categorical variable into dummy/indicator variables. Examples -------- >>> pd.Series([\"a|b\", \"a\", \"a|c\"]).str.get_dummies() a b c 0 1 1 0 1 1 0 0 2 1 0 1 >>> pd.Series([\"a|b\", np.nan, \"a|c\"]).str.get_dummies() a b c 0 1 1 0 1 0 0 0 2 1 0 1 >>> pd.Series([\"a|b\", np.nan, \"a|c\"]).str.get_dummies(dtype=bool) a b c 0 True True False 1 False False False 2 True False True", + "type": "method", + "file_path": "pandas\\pandas\\core\\strings\\accessor.py", + "ast_data": "FunctionDef name:get_dummies arg:self arg:sep arg:dtype arguments arg arg arg If BoolOp Compare BoolOp Call Call Raise Call Assign Call If Call Return return:yes Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_kl_dirichlet_dirichlet", + "source_code": "@kullback_leibler.RegisterKL(Dirichlet, Dirichlet)\ndef _kl_dirichlet_dirichlet(d1, d2, name=None):\n with ops.name_scope(name, 'kl_dirichlet_dirichlet', values=[d1.concentration, d2.concentration]):\n digamma_sum_d1 = math_ops.digamma(math_ops.reduce_sum(d1.concentration, axis=-1, keepdims=True))\n digamma_diff = math_ops.digamma(d1.concentration) - digamma_sum_d1\n concentration_diff = d1.concentration - d2.concentration\n return math_ops.reduce_sum(concentration_diff * digamma_diff, axis=-1) - special_math_ops.lbeta(d1.concentration) + special_math_ops.lbeta(d2.concentration)", + "docstring": "Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet. Args: d1: instance of a Dirichlet distribution object. d2: instance of a Dirichlet distribution object. name: (optional) Name to use for created operations. default is \"kl_dirichlet_dirichlet\". Returns: Batchwise KL(d1 || d2)", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet.py", + "ast_data": "FunctionDef name:_kl_dirichlet_dirichlet arg:d1 arg:d2 arg:name arguments arg arg arg With Call Assign Call Call Assign Call Assign Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_tensor_name_base", + "source_code": "def _tensor_name_base(full_tensor_name):\n if full_tensor_name.startswith('^'):\n return full_tensor_name[1:]\n return full_tensor_name.split(':')[0]", + "docstring": "Removes the device assignment code from a tensor. e.g. _tensor_name_base(\"foo:3\") => \"foo\" Args: full_tensor_name: A tensor name that is annotated with a device placement (this is what tensor flow introspection gives). Returns: A name without any device assignment.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py", + "ast_data": "FunctionDef name:_tensor_name_base arg:full_tensor_name arguments arg If Call Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "GraphPartitionMap", + "source_code": "@dataclasses.dataclass(frozen=True)\nclass GraphPartitionMap:\n id: int\n input_index_mapping: list[Optional[int]]\n output_index_mapping: list[Optional[int]]\n constant_names: list[str]", + "docstring": "Mapping from the partition info (e.g., input/output) to the graph info", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\utils.py", + "ast_data": "ClassDef name:GraphPartitionMap Call" + }, + { + "library": "django", + "name": "sym_difference", + "source_code": "def sym_difference(self, other):\n return self._geomgen(capi.geom_sym_diff, other)", + "docstring": "Return a new geometry which is the symmetric difference of this geometry and the other.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:sym_difference arg:self arg:other arguments arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "score_samples", + "source_code": "def score_samples(self, X):\n check_is_fitted(self)\n v = validate_data(self, X, accept_sparse='csr', reset=False)\n rng = check_random_state(self.random_state)\n ind = (np.arange(v.shape[0]), rng.randint(0, v.shape[1], v.shape[0]))\n if sp.issparse(v):\n data = -2 * v[ind] + 1\n if isinstance(data, np.matrix):\n v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)\n else:\n v_ = v + sp.csr_array((data.ravel(), ind), shape=v.shape)\n else:\n v_ = v.copy()\n v_[ind] = 1 - v_[ind]\n fe = self._free_energy(v)\n fe_ = self._free_energy(v_)\n return -v.shape[1] * np.logaddexp(0, -(fe_ - fe))", + "docstring": "Compute the pseudo-likelihood of X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Values of the visible layer. Must be all-boolean (not checked). Returns ------- pseudo_likelihood : ndarray of shape (n_samples,) Value of the pseudo-likelihood (proxy for likelihood). Notes ----- This method is not deterministic: it computes a quantity called the free energy on X, then on a randomly corrupted version of X, and returns the log of the logistic function of the difference.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neural_network\\_rbm.py", + "ast_data": "FunctionDef name:score_samples arg:self arg:X arguments arg arg Call Assign Call Assign Call Assign Call Call If Call Assign If Call Assign Call Call Assign Call Call Assign Call Assign Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit_transform", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit_transform(self, X, y=None):\n _check_positive_coding(method=self.fit_algorithm, positive=self.positive_code)\n method = 'lasso_' + self.fit_algorithm\n random_state = check_random_state(self.random_state)\n X = validate_data(self, X)\n if self.n_components is None:\n n_components = X.shape[1]\n else:\n n_components = self.n_components\n V, U, E, self.n_iter_ = _dict_learning(X, n_components, alpha=self.alpha, tol=self.tol, max_iter=self.max_iter, method=method, method_max_iter=self.transform_max_iter, n_jobs=self.n_jobs, code_init=self.code_init, dict_init=self.dict_init, callback=self.callback, verbose=self.verbose, random_state=random_state, return_n_iter=True, positive_dict=self.positive_dict, positive_code=self.positive_code)\n self.components_ = U\n self.error_ = E\n return V", + "docstring": "Fit the model from data in X and return the transformed data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- V : ndarray of shape (n_samples, n_components) Transformed data.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py", + "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Call Assign Assign Call Assign Call If Compare Assign Assign Assign Call Assign Assign Return return:yes Call" + }, + { + "library": "pandas", + "name": "_get_cython_vals", + "source_code": "def _get_cython_vals(self, values: np.ndarray) -> np.ndarray:\n how = self.how\n if how in ['median', 'std', 'sem', 'skew', 'kurt']:\n values = ensure_float64(values)\n elif values.dtype.kind in 'iu':\n if how in ['var', 'mean'] or (self.kind == 'transform' and self.has_dropped_na):\n values = ensure_float64(values)\n elif how in ['sum', 'ohlc', 'prod', 'cumsum', 'cumprod']:\n if values.dtype.kind == 'i':\n values = ensure_int64(values)\n else:\n values = ensure_uint64(values)\n return values", + "docstring": "Cast numeric dtypes to float64 for functions that only support that. Parameters ---------- values : np.ndarray Returns ------- values : np.ndarray", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\ops.py", + "ast_data": "FunctionDef name:_get_cython_vals arg:self arg:values arguments arg arg Assign If Compare Assign Call If Compare If BoolOp Compare BoolOp Compare Assign Call If Compare If Compare Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "to_ragged_variant", + "source_code": "def to_ragged_variant(value):\n if not isinstance(value, tensor.Tensor) or value.shape.rank is None or value.shape.is_fully_defined():\n return value\n else:\n spec = to_ragged_spec(tensor.TensorSpec.from_tensor(value))\n if spec._ragged_rank > 0:\n value = ragged_tensor.RaggedTensor.from_tensor(value, ragged_rank=spec._ragged_rank)\n return spec._to_tensor_list(value)[0]", + "docstring": "Re-encode Tensors as RaggedTensors.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\ragged_batch_op.py", + "ast_data": "FunctionDef name:to_ragged_variant arg:value arguments arg If BoolOp Call Compare Call Return return:yes Assign Call Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_run_for_tpu", + "source_code": "def _run_for_tpu(self, distributed_train_function, *args, **kwargs):\n gen_check_preemption_op.check_preemption(preemption_key=PREEMPTION_KEY)\n return distributed_train_function(*args, **kwargs)", + "docstring": "PreemptionCheckpointHandler.run implementation for TPUStrategy.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py", + "ast_data": "FunctionDef name:_run_for_tpu arg:self arg:distributed_train_function arguments arg arg arg arg Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_apply_all_reduce", + "source_code": "def _apply_all_reduce(reduction, tensors):\n if not tensors:\n raise ValueError('Must pass >0 tensors to all reduce operations')\n shared_name = _get_shared_name()\n\n def _all_reduce():\n res = []\n for t in tensors:\n _check_device(t)\n with ops.device(t.device):\n res.append(gen_nccl_ops.nccl_all_reduce(input=t, reduction=reduction, num_devices=len(tensors), shared_name=shared_name))\n return res\n if context.executing_eagerly():\n return def_function.function(_all_reduce)()\n else:\n return _all_reduce()", + "docstring": "Helper function for all_* functions.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nccl_ops.py", + "ast_data": "FunctionDef name:_apply_all_reduce arg:reduction arg:tensors arguments arg arg If Raise Call Assign Call FunctionDef name:_all_reduce arguments Assign For Call With Call Call Call Call Return return:yes If Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_decompose_indices", + "source_code": "def _decompose_indices(self, indices):\n if indices.shape.rank != 1:\n raise ValueError(f'ShardedVariable: indices must be 1D Tensor for sparse operations. Received shape: {indices.shape}')\n base = self._shape[0] // len(self._variables)\n extra = self._shape[0] % len(self._variables)\n expect_first_dim = [base] * len(self._variables)\n for i in range(extra):\n expect_first_dim[i] = expect_first_dim[i] + 1\n actual_first_dim = [v.shape.as_list()[0] for v in self._variables]\n if expect_first_dim != actual_first_dim:\n raise NotImplementedError('scatter_xxx ops are not supported in ShardedVariable that does not conform to \"div\" sharding')\n partition_assignments = math_ops.maximum(indices // (base + 1), (indices - extra) // base)\n local_indices = array_ops.where(partition_assignments < extra, indices % (base + 1), (indices - extra) % base)\n partition_assignments = math_ops.cast(partition_assignments, dtypes.int32)\n per_var_indices = data_flow_ops.dynamic_partition(local_indices, partition_assignments, len(self._variables))\n return (per_var_indices, partition_assignments)", + "docstring": "Decompose a global 1D indices into a list of per-variable indices.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py", + "ast_data": "FunctionDef name:_decompose_indices arg:self arg:indices arguments arg arg If Compare Raise Call Assign Call Assign Call Assign Call For Call Assign Assign Call If Compare Raise Call Assign Call Assign Call Compare Assign Call Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "gdal_full_version", + "source_code": "def gdal_full_version():\n return _version_info(b'')", + "docstring": "Return the full GDAL version information.", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\gdal\\libgdal.py", + "ast_data": "FunctionDef name:gdal_full_version arguments Return return:yes Call" + }, + { + "library": "pandas", + "name": "sum", + "source_code": "def sum(self, axis: AxisInt=0, min_count: int=0, skipna: bool=True, *args, **kwargs) -> Scalar:\n nv.validate_sum(args, kwargs)\n valid_vals = self._valid_sp_values\n sp_sum = valid_vals.sum()\n has_na = self.sp_index.ngaps > 0 and (not self._null_fill_value)\n if has_na and (not skipna):\n return na_value_for_dtype(self.dtype.subtype, compat=False)\n if self._null_fill_value:\n if check_below_min_count(valid_vals.shape, None, min_count):\n return na_value_for_dtype(self.dtype.subtype, compat=False)\n return sp_sum\n else:\n nsparse = self.sp_index.ngaps\n if check_below_min_count(valid_vals.shape, None, min_count - nsparse):\n return na_value_for_dtype(self.dtype.subtype, compat=False)\n return sp_sum + self.fill_value * nsparse", + "docstring": "Sum of non-NA/null values Parameters ---------- axis : int, default 0 Not Used. NumPy compatibility. min_count : int, default 0 The required number of valid values to perform the summation. If fewer than `` valid values are present, the result will be the missing value indicator for subarray type. *args, **kwargs Not Used. NumPy compatibility. Returns ------- scalar", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py", + "ast_data": "FunctionDef name:sum arg:self arg:axis arg:min_count arg:skipna arguments arg arg arg arg arg arg Call Assign Assign Call Assign BoolOp Compare If BoolOp Return return:yes Call If If Call Return return:yes Call Return return:yes Assign If Call Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "hip_header_magic", + "source_code": "def hip_header_magic(input_string):\n output_string = input_string\n headers = ['hip/hip_runtime.h', 'hip/hip_runtime_api.h']\n if any((re.search(f'#include (\"{ext}\"|<{ext}>)', output_string) for ext in headers)):\n return output_string\n hasDeviceLogic: int\n hasDeviceLogic = 'hipLaunchKernelGGL' in output_string\n hasDeviceLogic += '__global__' in output_string\n hasDeviceLogic += '__shared__' in output_string\n hasDeviceLogic += RE_SYNCTHREADS.search(output_string) is not None\n if hasDeviceLogic:\n output_string = '#include \"hip/hip_runtime.h\"\\n' + input_string\n return output_string", + "docstring": "If the file makes kernel builtin calls and does not include the cuda_runtime.h header, then automatically add an #include to match the \"magic\" includes provided by NVCC. TODO: Update logic to ignore cases where the cuda_runtime.h is included by another file.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py", + "ast_data": "FunctionDef name:hip_header_magic arg:input_string arguments arg Assign Assign If Call Call Return return:yes Assign Compare Compare Compare Compare Call If Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_DerivedObserverOrFakeQuantize", + "source_code": "class _DerivedObserverOrFakeQuantize(ObserverBase):\n\n def __init__(self, dtype: torch.dtype, obs_or_fqs: list[ObserverOrFakeQuantize], derive_qparams_fn: Callable[[list[ObserverOrFakeQuantize]], tuple[Tensor, Tensor]], quant_min: Optional[int]=None, quant_max: Optional[int]=None, qscheme: Optional[torch.qscheme]=None, ch_axis: Optional[int]=None):\n super().__init__(dtype)\n self.obs_or_fqs = obs_or_fqs\n self.derive_qparams_fn = derive_qparams_fn\n self.quant_min = quant_min\n self.quant_max = quant_max\n self.qscheme = qscheme\n self.ch_axis = ch_axis\n from .utils import is_per_channel\n if is_per_channel(self.qscheme):\n assert self.ch_axis is not None, 'Must provide a valid ch_axis if qscheme is per channel'\n\n def forward(self, x: Tensor) -> Tensor:\n return x\n\n def calculate_qparams(self):\n return self.derive_qparams_fn(self.obs_or_fqs)", + "docstring": "This observer is used to describe an observer whose quantization parameters are derived from other observers", + "type": "class", + "file_path": "pytorch\\torch\\ao\\quantization\\__init__.py", + "ast_data": "ClassDef name:_DerivedObserverOrFakeQuantize FunctionDef name:__init__ arg:self arg:dtype arg:obs_or_fqs arg:derive_qparams_fn arg:quant_min arg:quant_max arg:qscheme arg:ch_axis arguments arg arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign If Call Compare FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes FunctionDef name:calculate_qparams arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "filter_with_legacy_function", + "source_code": "@deprecation.deprecated(None, 'Use `tf.data.Dataset.filter()')\ndef filter_with_legacy_function(self, predicate) -> 'DatasetV2':\n from tensorflow.python.data.ops import filter_op\n return filter_op._FilterDataset(self, predicate, use_legacy_function=True)", + "docstring": "Filters this dataset according to . Note: This is an escape hatch for existing uses of that do not work with V2 functions. New uses are strongly discouraged and existing uses should migrate to as this method will be removed in V2. Args: predicate: A function mapping a (nested) structure of tensors (having shapes and types defined by and ) to a scalar tensor. Returns: Dataset: The containing the elements of this dataset for which is .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "FunctionDef name:filter_with_legacy_function arg:self arg:predicate arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "autocast", + "source_code": "class autocast(torch.amp.autocast_mode.autocast):\n\n @deprecated(\"`torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\", category=FutureWarning)\n def __init__(self, enabled: bool=True, dtype: torch.dtype=torch.float16, cache_enabled: bool=True):\n if torch._jit_internal.is_scripting():\n self._enabled = enabled\n self.device = 'cuda'\n self.fast_dtype = dtype\n return\n super().__init__('cuda', enabled=enabled, dtype=dtype, cache_enabled=cache_enabled)\n\n def __enter__(self):\n if torch._jit_internal.is_scripting():\n return self\n return super().__enter__()\n\n def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any):\n if torch._jit_internal.is_scripting():\n return\n return super().__exit__(exc_type, exc_val, exc_tb)\n\n def __call__(self, func):\n if torch._jit_internal.is_scripting():\n return func\n return super().__call__(func)", + "docstring": "See :class:. `` instead.", + "type": "class", + "file_path": "pytorch\\torch\\cuda\\amp\\autocast_mode.py", + "ast_data": "ClassDef name:autocast FunctionDef name:__init__ arg:self arg:enabled arg:dtype arg:cache_enabled arguments arg arg arg arg If Call Assign Assign Assign Return return:no Call Call Call FunctionDef name:__enter__ arg:self arguments arg If Call Return return:yes Return return:yes Call Call FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg If Call Return return:no Return return:yes Call Call FunctionDef name:__call__ arg:self arg:func arguments arg arg If Call Return return:yes Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "unregister", + "source_code": "def unregister(self, name):\n if name in self._builtin_cmaps:\n raise ValueError(f'cannot unregister {name!r} which is a builtin colormap.')\n self._cmaps.pop(name, None)", + "docstring": "Remove a colormap from the registry. You cannot remove built-in colormaps. If the named colormap is not registered, returns with no error, raises if you try to de-register a default colormap. .. warning:: Colormap names are currently a shared namespace that may be used by multiple packages. Use only if you know you have registered that name before. In particular, do not unregister just in case to clean the name before registering a new colormap. Parameters ---------- name : str The name of the colormap to be removed. Raises ------ ValueError If you try to remove a default built-in colormap.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\cm.py", + "ast_data": "FunctionDef name:unregister arg:self arg:name arguments arg arg If Compare Raise Call Call" + }, + { + "library": "django", + "name": "get_relations", + "source_code": "def get_relations(self, cursor, table_name):\n raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_relations() method.')", + "docstring": "Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all foreign keys in the given table.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\introspection.py", + "ast_data": "FunctionDef name:get_relations arg:self arg:cursor arg:table_name arguments arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "_register_dataset", + "source_code": "def _register_dataset(service, dataset, compression, dataset_id=None) -> tensor.Tensor:\n _validate_compression(compression)\n if isinstance(service, tuple):\n protocol, address = service\n else:\n protocol, address = _parse_service(service)\n external_state_policy = dataset.options().experimental_external_state_policy\n if external_state_policy is None:\n external_state_policy = ExternalStatePolicy.WARN\n encoded_spec = None\n if context.executing_eagerly():\n encoded_spec = nested_structure_coder.encode_structure(dataset.element_spec).SerializeToString()\n if compression == COMPRESSION_AUTO or compression == COMPRESSION_SNAPPY:\n dataset = dataset.map(lambda *x: compression_ops.compress(x), num_parallel_calls=dataset_ops.AUTOTUNE)\n dataset = dataset._apply_debug_options()\n metadata = data_service_pb2.DataServiceMetadata(element_spec=encoded_spec, compression=_get_compression_proto(compression))\n return gen_experimental_dataset_ops.register_dataset_v2(dataset._variant_tensor, address=address, protocol=protocol, external_state_policy=external_state_policy.value, requested_dataset_id=dataset_id, metadata=metadata.SerializeToString())", + "docstring": "Registers a dataset with the tf.data service. This transformation is similar to , but supports additional parameters which we do not yet want to add to the public Python API. Args: service: A string or a tuple indicating how to connect to the tf.data service. If it's a string, it should be in the format , where `tf.data.DatasetNonedataset_id` is provided, it will use the specified ID. If a dataset with a matching ID already exists, no new dataset is registered. This is useful if multiple training jobs want to (re)use the same dataset for training. In this case, they can register the dataset with the same dataset ID. Returns: A scalar string tensor representing the dataset ID.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\data_service_ops.py", + "ast_data": "FunctionDef name:_register_dataset arg:service arg:dataset arg:compression arg:dataset_id arguments arg arg arg arg Call If Call Assign Assign Call Assign Call If Compare Assign Assign If Call Assign Call Call If BoolOp Compare Compare Assign Call arguments arg Call Assign Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_control_outputs", + "source_code": "def get_control_outputs(self, op):\n if op.graph not in self.cache:\n control_outputs = self.calc_control_outputs(op.graph)\n self.cache[op.graph] = control_outputs\n else:\n control_outputs = self.cache[op.graph]\n return control_outputs.get(op, [])", + "docstring": "Return the control outputs for a given op. Args: op: The op to fetch control outputs for. Returns: Iterable of control output ops.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\subscribe.py", + "ast_data": "FunctionDef name:get_control_outputs arg:self arg:op arguments arg arg If Compare Assign Call Assign Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "compile_graph", + "source_code": "def compile_graph(self, gm: GraphModule) -> Callable[..., Any]:\n return gm.forward", + "docstring": "Converts the graph module into a runnable function. The default implementation is simply an interpreter calling kernels in eager mode. Derived backends can override this to do further compilation.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py", + "ast_data": "FunctionDef name:compile_graph arg:self arg:gm arguments arg arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "_parse_figsize", + "source_code": "def _parse_figsize(figsize, dpi):\n num_parts = len(figsize)\n if num_parts == 2:\n return figsize\n elif num_parts == 3:\n x, y, unit = figsize\n if unit == 'in':\n pass\n elif unit == 'cm':\n x /= 2.54\n y /= 2.54\n elif unit == 'px':\n x /= dpi\n y /= dpi\n else:\n raise ValueError(f\"Invalid unit {unit!r} in 'figsize'; supported units are 'in', 'cm', 'px'\")\n return (x, y)\n else:\n raise ValueError(f'Invalid figsize format, expected (x, y) or (x, y, unit) but got {figsize!r}')", + "docstring": "Convert a figsize expression to (width, height) in inches. Parameters ---------- figsize : (float, float) or (float, float, str) This can be - a tuple ``. dpi : float The dots-per-inch; used for converting 'px' to 'in'.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:_parse_figsize arg:figsize arg:dpi arguments arg arg Assign Call If Compare Return return:yes If Compare Assign If Compare If Compare If Compare Raise Call Return return:yes Raise Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, num_packs=1):\n if num_packs <= 0:\n raise ValueError('num_packs must be greater than zero.')\n self.num_packs = num_packs", + "docstring": "Initialize the _ConcatAndSplitPacker object. Args: num_packs: specifies the number of split packs that will be formed. Raises: ValueError: if num_packs is not greater than 0.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:num_packs arguments arg arg If Compare Raise Call Assign" + }, + { + "library": "scipy", + "name": "_attach_argparser_methods", + "source_code": "def _attach_argparser_methods(self):\n ns = {}\n exec(self._parse_arg_template, ns)\n for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:\n setattr(self, name, types.MethodType(ns[name], self))", + "docstring": "Generates the argument-parsing functions dynamically and attaches them to the instance. Should be called from , typically in __init__ and during unpickling (__setstate__)", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:_attach_argparser_methods arg:self arguments arg Assign Call For Call Call" + }, + { + "library": "django", + "name": "table_name_col", + "source_code": "@classmethod\ndef table_name_col(cls):\n return 'f_table_name'", + "docstring": "Return the name of the metadata column used to store the feature table name.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\models.py", + "ast_data": "FunctionDef name:table_name_col arg:cls arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, topology: Topology, core_assignment: np.ndarray):\n if not isinstance(topology, Topology):\n raise ValueError('topology must be a Topology object, got {}'.format(type(topology)))\n core_assignment = numpy_compat.np_asarray(core_assignment, dtype=np.int32)\n self._topology = topology\n if core_assignment.ndim != 3:\n raise ValueError(f'core_assignment must be a rank 3 numpy array, got shape {core_assignment.shape}')\n self._num_replicas = core_assignment.shape[0]\n self._num_cores_per_replica = core_assignment.shape[1]\n if core_assignment.shape[-1] != topology.mesh_rank:\n raise ValueError(f'core_assignment.shape[-1] must have size equal to topology rank ({topology.mesh_rank}), got core_assignment.shape={core_assignment.shape}')\n self._core_assignment = core_assignment\n self._task_and_cores_to_replicas = _compute_task_and_cores_to_replicas(self._core_assignment, topology)", + "docstring": "Constructs a object. Args: topology: A object that describes the physical TPU topology. core_assignment: A logical to physical core mapping, represented as a rank 3 numpy array. See the description of the property for more details. Raises: ValueError: If is not object. ValueError: If is not a rank 3 numpy array.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\device_assignment.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:topology arg:core_assignment arguments arg arg arg If Call Raise Call Call Call Assign Call Assign If Compare Raise Call Assign Assign If Compare Raise Call Assign Assign Call" + }, + { + "library": "matplotlib", + "name": "_set_active_handle", + "source_code": "def _set_active_handle(self, event):\n e_idx, e_dist = self._edge_handles.closest(event.x, event.y)\n if 'move' in self._state:\n self._active_handle = 'C'\n elif e_dist > self.grab_range:\n self._active_handle = None\n if self.drag_from_anywhere and self._contains(event):\n self._active_handle = 'C'\n self._extents_on_press = self.extents\n else:\n self._active_handle = None\n return\n else:\n self._active_handle = self._edge_order[e_idx]\n self._extents_on_press = self.extents", + "docstring": "Set active handle based on the location of the mouse event.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:_set_active_handle arg:self arg:event arguments arg arg Assign Call If Compare Assign If Compare Assign If BoolOp Call Assign Assign Assign Return return:no Assign Assign" + }, + { + "library": "tensorflow", + "name": "get_config", + "source_code": "@tf_export('__internal__.eager_context.get_config', v1=[])\ndef get_config():\n return context().config", + "docstring": "Get the ConfigProto of Context. Returns: The ConfigProto of Context.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:get_config arguments Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "should_swap_XW", + "source_code": "@staticmethod\ndef should_swap_XW(bias: IRNode) -> bool:\n if bias is not None and len(bias.get_stride()) >= 2 and (bias.get_stride()[-1] in (0, 1)):\n log.debug('GEMM Layout swapped X and W -> explicit transpose')\n return True\n return False", + "docstring": "Helper method to determine whether we should do an explicit transpose by switching the order of the matmul operands. This might be neccessary when we can't otherwise arrive at the right memory layout for the given Bias operand. Note: This method is a workaround for CUDA Errors that seemingly non-deterministically occurred in practice in some CUTLASS GEMM Kernels with Linear epilogues that have a bias term. it might make sense to check on newer Cutlass releases whether it makes sense to keep returning True in certain cases or whether it becomes unneccessary.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py", + "ast_data": "FunctionDef name:should_swap_XW arg:bias arguments arg If BoolOp Compare Compare Call Call Compare Call Call Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "_truncate", + "source_code": "def _truncate(w, needed):\n if needed:\n return w[:-1]\n else:\n return w", + "docstring": "Truncate window by 1 sample if needed for DFT-even symmetry", + "type": "function", + "file_path": "scipy\\scipy\\signal\\windows\\_windows.py", + "ast_data": "FunctionDef name:_truncate arg:w arg:needed arguments arg arg If Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "module", + "source_code": "def module(self) -> torch.nn.Module:\n from ._unlift import _unlift_exported_program_lifted_states\n module = _unlift_exported_program_lifted_states(self)\n\n def _train(self, mode: bool=True):\n raise NotImplementedError('Calling train() is not supported yet.')\n\n def _eval(self, mode: bool=True):\n raise NotImplementedError('Calling eval() is not supported yet.')\n module.train = types.MethodType(_train, module)\n module.eval = types.MethodType(_eval, module)\n return module", + "docstring": "Returns a self contained GraphModule with all the parameters/buffers inlined.", + "type": "method", + "file_path": "pytorch\\torch\\export\\exported_program.py", + "ast_data": "FunctionDef name:module arg:self arguments arg Assign Call FunctionDef name:_train arg:self arg:mode arguments arg arg Raise Call FunctionDef name:_eval arg:self arg:mode arguments arg arg Raise Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "reshape", + "source_code": "@tf_export.tf_export('experimental.numpy.reshape', v1=[])\n@np_utils.np_doc('reshape')\ndef reshape(a, newshape, order='C'):\n if order not in {'C', 'F'}:\n raise ValueError('Unsupported order argument {}'.format(order))\n a = asarray(a)\n if isinstance(newshape, int):\n newshape = [newshape]\n if order == 'F':\n r = array_ops.transpose(array_ops.reshape(array_ops.transpose(a), newshape[::-1]))\n else:\n r = array_ops.reshape(a, newshape)\n return r", + "docstring": "order argument can only b 'C' or 'F'.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py", + "ast_data": "FunctionDef name:reshape arg:a arg:newshape arg:order arguments arg arg arg If Compare Raise Call Call Assign Call If Call Assign If Compare Assign Call Call Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "parse_example_spec", + "source_code": "@property\ndef parse_example_spec(self):\n return {self.key: parsing_ops.VarLenFeature(self.dtype)}", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py", + "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "_prepare_index_for_memoryview", + "source_code": "def _prepare_index_for_memoryview(i, j, x=None):\n if i.dtype > j.dtype:\n j = j.astype(i.dtype)\n elif i.dtype < j.dtype:\n i = i.astype(j.dtype)\n if not i.flags.writeable or i.dtype not in (np.int32, np.int64):\n i = i.astype(np.intp)\n if not j.flags.writeable or j.dtype not in (np.int32, np.int64):\n j = j.astype(np.intp)\n if x is not None:\n if not x.flags.writeable:\n x = x.copy()\n return (i, j, x)\n else:\n return (i, j)", + "docstring": "Convert index and data arrays to form suitable for passing to the Cython fancy getset routines. The conversions are necessary since to (i) ensure the integer index arrays are in one of the accepted types, and (ii) to ensure the arrays are writable so that Cython memoryview support doesn't choke on them. Parameters ---------- i, j Index arrays x : optional Data arrays Returns ------- i, j, x Re-formatted arrays (x is omitted, if input was None)", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\_lil.py", + "ast_data": "FunctionDef name:_prepare_index_for_memoryview arg:i arg:j arg:x arguments arg arg arg If Compare Assign Call If Compare Assign Call If BoolOp Compare Assign Call If BoolOp Compare Assign Call If Compare If Assign Call Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "identify_hasher", + "source_code": "def identify_hasher(encoded):\n if len(encoded) == 32 and '$' not in encoded or (len(encoded) == 37 and encoded.startswith('md5$$')):\n algorithm = 'unsalted_md5'\n elif len(encoded) == 46 and encoded.startswith('sha1$$'):\n algorithm = 'unsalted_sha1'\n else:\n algorithm = encoded.split('$', 1)[0]\n return get_hasher(algorithm)", + "docstring": "Return an instance of a loaded password hasher. Identify hasher algorithm by examining encoded hash, and call get_hasher() to return hasher. Raise ValueError if algorithm cannot be identified, or if hasher is not loaded.", + "type": "function", + "file_path": "django\\django\\contrib\\auth\\hashers.py", + "ast_data": "FunctionDef name:identify_hasher arg:encoded arguments arg If BoolOp BoolOp Compare Call Compare BoolOp Compare Call Call Assign If BoolOp Compare Call Call Assign Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "cluster_qr", + "source_code": "def cluster_qr(vectors):\n k = vectors.shape[1]\n _, _, piv = qr(vectors.T, pivoting=True)\n ut, _, v = svd(vectors[piv[:k], :].T)\n vectors = abs(np.dot(vectors, np.dot(ut, v.conj())))\n return vectors.argmax(axis=1)", + "docstring": "Find the discrete partition closest to the eigenvector embedding. This implementation was proposed in [1]_. .. versionadded:: 1.1 Parameters ---------- vectors : array-like, shape: (n_samples, n_clusters) The embedding space of the samples. Returns ------- labels : array of integers, shape: n_samples The cluster labels of vectors. References ---------- .. [1] :doi:", + "type": "function", + "file_path": "scikit-learn\\sklearn\\cluster\\_spectral.py", + "ast_data": "FunctionDef name:cluster_qr arg:vectors arguments arg Assign Assign Call Assign Call Assign Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "L1", + "source_code": "class L1(Regularizer):\n\n def __init__(self, l1=0.01, **kwargs):\n l1 = kwargs.pop('l', l1)\n if kwargs:\n raise TypeError('Argument(s) not recognized: %s' % (kwargs,))\n l1 = 0.01 if l1 is None else l1\n _check_penalty_number(l1)\n self.l1 = backend.cast_to_floatx(l1)\n\n def __call__(self, x):\n return self.l1 * math_ops.reduce_sum(math_ops.abs(x))\n\n def get_config(self):\n return {'l1': float(self.l1)}", + "docstring": "A regularizer that applies a L1 regularization penalty. The L1 regularization penalty is computed as: L1 may be passed to a layer as a string identifier: >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1') In this case, the default value used is . Attributes: l1: Float; L1 regularization factor.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\regularizers.py", + "ast_data": "ClassDef name:L1 FunctionDef name:__init__ arg:self arg:l1 arguments arg arg arg Assign Call If Raise Call Assign Compare Call Assign Call FunctionDef name:__call__ arg:self arg:x arguments arg arg Return return:yes Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "next", + "source_code": "def next(self):\n return self.__next__()", + "docstring": "Return the next line of bytes.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpreqbody.py", + "ast_data": "FunctionDef name:next arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "construct_array_type", + "source_code": "@classmethod\ndef construct_array_type(cls) -> type_t[DatetimeArray]:\n from pandas.core.arrays import DatetimeArray\n return DatetimeArray", + "docstring": "Return the array type associated with this dtype. Returns ------- type", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", + "ast_data": "FunctionDef name:construct_array_type arg:cls arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "flatten", + "source_code": "def flatten(line):\n for element in line:\n if iterable_not_string(element):\n yield from flatten(element)\n else:\n yield element", + "docstring": "Flatten an arbitrarily nested sequence. Parameters ---------- line : sequence The non string sequence to flatten Notes ----- This doesn't consider strings sequences. Returns ------- flattened : generator", + "type": "function", + "file_path": "pandas\\pandas\\core\\common.py", + "ast_data": "FunctionDef name:flatten arg:line arguments arg For If Call Call" + }, + { + "library": "pytorch", + "name": "visualize_graph_executor", + "source_code": "def visualize_graph_executor(state, name_prefix, pb_graph, inline_graph):\n if state.autograd_fallback_graph is not None:\n visualize(graph=state.autograd_fallback_graph, name_prefix=name_prefix + 'autograd_fallback/', pb_graph=pb_graph, executors_it=iter(state.autograd_fallback.executors()))\n for i, (arg_spec, plan) in enumerate(state.execution_plans.items()):\n subgraph_name = name_prefix + f'plan{i}/'\n input_kinds = pb_graph.node.add(op='INPUT_KIND', name=subgraph_name)\n input_kinds.attr['inputs'].s = repr(arg_spec).encode('ascii')\n visualize(plan.graph, subgraph_name, pb_graph, iter(plan.code.executors()))\n if plan.grad_executor is not None:\n grad_subgraph_name = subgraph_name + 'grad/'\n visualize(plan.grad_executor, grad_subgraph_name, pb_graph)\n return inline_graph(state.graph, name_prefix + 'original/')", + "docstring": "Append the state of a given GraphExecutor to the graph protobuf. Args: state (GraphExecutor or GraphExecutorState): GraphExecutor to display. name_prefix (str): Name prefix of the containing subgraph. pb_graph (GraphDef): graph to append to. inline_graph (Callable): a function that handles setting up a value_map, so that some graphs in here can be inlined. This is necessary, because this will simply be for the top-level GraphExecutor, or for all nested ones. The signature should look like (Graph, name_prefix) -> (). It will be called exactly once. The strategy is to embed all different configurations as independent subgraphs, while inlining the original graph as the one that actually produces the values.", + "type": "function", + "file_path": "pytorch\\torch\\contrib\\_tensorboard_vis.py", + "ast_data": "FunctionDef name:visualize_graph_executor arg:state arg:name_prefix arg:pb_graph arg:inline_graph arguments arg arg arg arg If Compare Call Call Call For Call Call Assign Assign Call Assign Call Call Call Call Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "activation_is_int32_quantized", + "source_code": "def activation_is_int32_quantized(qconfig):\n return activation_dtype(qconfig) in [torch.qint32, torch.int32]", + "docstring": "Given a qconfig, decide if the activation needs to be quantized to int32 or not", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\utils.py", + "ast_data": "FunctionDef name:activation_is_int32_quantized arg:qconfig arguments arg Return return:yes Compare Call" + }, + { + "library": "scipy", + "name": "right_censored", + "source_code": "@classmethod\ndef right_censored(cls, x, censored):\n x, censored = _validate_x_censored(x, censored)\n return cls(uncensored=x[~censored], right=x[censored])", + "docstring": "Create a instance of right-censored data. Parameters ---------- x : array_like is the array of observed data or measurements. must be a one-dimensional sequence of finite numbers. censored : array_like of bool must be a one-dimensional sequence of boolean values. If `xCensoredDataCensoredData` that represents the collection of uncensored and right-censored values. Examples -------- >>> from scipy.stats import CensoredData Two uncensored values (4 and 10) and two right-censored values (24 and 25). >>> data = CensoredData.right_censored([4, 10, 24, 25], ... [False, False, True, True]) >>> data CensoredData(uncensored=array([ 4., 10.]), left=array([], dtype=float64), right=array([24., 25.]), interval=array([], shape=(0, 2), dtype=float64)) >>> print(data) CensoredData(4 values: 2 not censored, 2 right-censored)", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_censored_data.py", + "ast_data": "FunctionDef name:right_censored arg:cls arg:x arg:censored arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "seaborn", + "name": "_determine_axis_sharing", + "source_code": "def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n axis_to_dim = {'x': 'col', 'y': 'row'}\n key: str\n val: str | bool\n for axis in 'xy':\n key = f'share{axis}'\n if key not in self.subplot_spec:\n if axis in pair_spec.get('structure', {}):\n if self.wrap is None and pair_spec.get('cross', True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n val = True\n self.subplot_spec[key] = val", + "docstring": "Update subplot spec with default or specified axis sharing parameters.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\subplots.py", + "ast_data": "FunctionDef name:_determine_axis_sharing arg:self arg:pair_spec arguments arg arg Assign For Assign If Compare If Compare Call If BoolOp Compare Call Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "compute_global_tensor_info", + "source_code": "def compute_global_tensor_info(tensor: torch.Tensor, mesh: DeviceMesh, placements: Sequence[Placement]) -> tuple[list[int], list[int]]:\n tensor_shape = list(tensor.size())\n tensor_stride = list(tensor.stride())\n for idx, placement in enumerate(placements):\n mesh_dim_size = mesh.size(idx)\n if placement.is_shard():\n shard_placement = cast(Shard, placement)\n if shard_placement.dim < 0:\n raise AssertionError(f'Shard placements should have negative dims normalized in the user-facing APIs: {shard_placement}')\n shard_dim = shard_placement.dim\n assert shard_dim < tensor.ndim, f'Sharding dim {shard_dim} greater than tensor ndim {tensor.ndim} for placement number {idx}.'\n local_dim_size = tensor_shape[shard_dim]\n tensor_shape[shard_dim] = local_dim_size * mesh_dim_size\n for i in range(len(tensor_stride)):\n if i != shard_dim and tensor_stride[i] >= tensor_stride[shard_dim]:\n tensor_stride[i] = tensor_stride[i] * mesh_dim_size\n elif not isinstance(placement, (Replicate, Partial)):\n raise RuntimeError(f'placement type {type(placement)} not supported!')\n return (tensor_shape, tensor_stride)", + "docstring": "Compute the global size and stride of a DTensor from the given local tensor. The local size is multiplited by per Sharding dim. The local stride is multiplited by per Sharding dim, as long as the dimension is outside sharding dim. For example, if we have a local tensor with size (4, 8, 2) and stride (16, 1, 8). If the DTensor placements are [Shard(2)] and world_size is 2; then the global size is (4, 8, 4) and stride is (16 * 2, 1, 8). Args: tensor (:class:): Local tensor which DTensor will be constructed from. mesh (:class:): Object which describes the mesh topology of devices for the DTensor. placements (Sequence[:class:]]): The attribute of the DTensor that describes its layout on the mesh topology. Return: tensor_shape: A List of int which specifies the size of DTensor which build on top of the local tensor. tensor_stride: A List of int which specifies the stride of DTensor.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\_utils.py", + "ast_data": "FunctionDef name:compute_global_tensor_info arg:tensor arg:mesh arg:placements arguments arg arg arg Assign Call Call Assign Call Call For Call Assign Call If Call Assign Call If Compare Raise Call Assign Compare Assign Assign For Call Call If BoolOp Compare Compare Assign If Call Raise Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_clean_triton", + "source_code": "def get_clean_triton(input_path: Path, output_path: Path=Path('triton_only_repro.py')):\n return process_file(str(input_path), str(output_path))", + "docstring": "Run experiments and output results to file Args: input_path (Optional[Path]): Path to inductor generated output codede output_path (Optional[Path]): Path to write out the new python file", + "type": "function", + "file_path": "pytorch\\torch\\utils\\_get_clean_triton.py", + "ast_data": "FunctionDef name:get_clean_triton arg:input_path arg:output_path arguments arg arg Call Return return:yes Call Call Call" + }, + { + "library": "seaborn", + "name": "_check_argument", + "source_code": "def _check_argument(param, options, value, prefix=False):\n if prefix and value is not None:\n failure = not any((value.startswith(p) for p in options if isinstance(p, str)))\n else:\n failure = value not in options\n if failure:\n raise ValueError(f'The value for `{param}` must be one of {options}, but {repr(value)} was passed.')\n return value", + "docstring": "Raise if value for param is not in options.", + "type": "function", + "file_path": "seaborn\\seaborn\\utils.py", + "ast_data": "FunctionDef name:_check_argument arg:param arg:options arg:value arg:prefix arguments arg arg arg arg If BoolOp Compare Assign Call Call Call Assign Compare If Raise Call Call Return return:yes" + }, + { + "library": "authlib", + "name": "validate_issuer", + "source_code": "def validate_issuer(self):\n issuer = self.get('issuer')\n if not issuer:\n raise ValueError('\"issuer\" is required')\n parsed = urlparse.urlparse(issuer)\n if not is_secure_transport(issuer):\n raise ValueError('\"issuer\" MUST use \"https\" scheme')\n if parsed.query or parsed.fragment:\n raise ValueError('\"issuer\" has no query or fragment')", + "docstring": "REQUIRED. The authorization server's issuer identifier, which is a URL that uses the \"https\" scheme and has no query or fragment components.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py", + "ast_data": "FunctionDef name:validate_issuer arg:self arguments arg Assign Call If Raise Call Assign Call If Call Raise Call If BoolOp Raise Call" + }, + { + "library": "tensorflow", + "name": "to_int64", + "source_code": "@tf_export(v1=['to_int64'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated(date=None, instructions='Use `tf.cast` instead.')\ndef to_int64(x, name='ToInt64'):\n return cast(x, dtypes.int64, name=name)", + "docstring": "Casts a tensor to type . Args: x: A or or . name: A name for the operation (optional). Returns: A or or with same shape as with type . Raises: TypeError: If cannot be cast to the . @compatibility(TF2) This name was deprecated and removed in TF2, but has an exact replacement . There are no further issues with eager execution or tf.function. Before: >>> tf.compat.v1.to_int64(tf.constant(1, dtype=tf.int32)) After: >>> tf.cast(tf.constant(1, dtype=tf.int32), tf.int64) @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:to_int64 arg:x arg:name arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "decision_function", + "source_code": "def decision_function(self, X):\n check_is_fitted(self)\n X = validate_data(self, X, accept_sparse=True, ensure_all_finite=False, reset=False)\n indices = self.pairwise_indices_\n if indices is None:\n Xs = [X] * len(self.estimators_)\n else:\n Xs = [X[:, idx] for idx in indices]\n predictions = np.vstack([est.predict(Xi) for est, Xi in zip(self.estimators_, Xs)]).T\n confidences = np.vstack([_predict_binary(est, Xi) for est, Xi in zip(self.estimators_, Xs)]).T\n Y = _ovr_decision_function(predictions, confidences, len(self.classes_))\n if self.n_classes_ == 2:\n return Y[:, 1]\n return Y", + "docstring": "Decision function for the OneVsOneClassifier. The decision values for the samples are computed by adding the normalized sum of pair-wise classification confidence levels to the votes in order to disambiguate between the decision values when the votes for all the classes are equal leading to a tie. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- Y : array-like of shape (n_samples, n_classes) or (n_samples,) Result of calling on the final estimator. .. versionchanged:: 0.19 output shape changed to `` to conform to scikit-learn conventions for binary classification.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\multiclass.py", + "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Call Assign Call Assign If Compare Assign Call Assign Assign Call Call Call Assign Call Call Call Assign Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, func, argnames, func_name=None, grad_func=None, python_grad_func=None, out_names=None, **kwargs):\n self._func = func\n self._argnames = argnames\n self._func_name = func_name\n assert grad_func is None or isinstance(grad_func, _OverloadedFunction)\n self._grad_func = grad_func\n self._python_grad_func = python_grad_func\n self._out_names = out_names\n self._extra_kwargs = kwargs\n self._overload = {}", + "docstring": "Creates _DefinedFunction. Args: func: A python callable which constructs a tf function body. argnames: A list of strings for function argument names. func_name: The function name. Defaults to None, in which derives from 'func'. grad_func: This function's gradient function, if not None. Defaults to None. python_grad_func: A python callable implementing the gradient of the function python-side. out_names: A list of strings for the function return value names. **kwargs: The keyword arguments. **kwargs is passed to every call site of this function. Raises: ValueError: The function definition is invalid.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:func arg:argnames arg:func_name arg:grad_func arg:python_grad_func arg:out_names arguments arg arg arg arg arg arg arg arg Assign Assign Assign BoolOp Compare Call Assign Assign Assign Assign Assign" + }, + { + "library": "scikit-learn", + "name": "staged_decision_function", + "source_code": "def staged_decision_function(self, X):\n yield from self._staged_raw_predict(X)", + "docstring": "Compute decision function of `classes_`.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py", + "ast_data": "FunctionDef name:staged_decision_function arg:self arg:X arguments arg arg Call" + }, + { + "library": "scipy", + "name": "lagrangian_hessian", + "source_code": "def lagrangian_hessian(self, z, v):\n Hx = self.lagrangian_hessian_x(z, v)\n if self.n_ineq > 0:\n S_Hs_S = self.lagrangian_hessian_s(z, v)\n\n def matvec(vec):\n vec_x = self.get_variables(vec)\n vec_s = self.get_slack(vec)\n if self.n_ineq > 0:\n return np.hstack((Hx.dot(vec_x), S_Hs_S * vec_s))\n else:\n return Hx.dot(vec_x)\n return LinearOperator((self.n_vars + self.n_ineq, self.n_vars + self.n_ineq), matvec)", + "docstring": "Returns scaled Lagrangian Hessian", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\tr_interior_point.py", + "ast_data": "FunctionDef name:lagrangian_hessian arg:self arg:z arg:v arguments arg arg arg Assign Call If Compare Assign Call FunctionDef name:matvec arg:vec arguments arg Assign Call Assign Call If Compare Return return:yes Call Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_register_pytree_node", + "source_code": "@deprecated('`torch.utils._cxx_pytree._register_pytree_node` is deprecated. Please use `torch.utils._cxx_pytree.register_pytree_node` instead.', category=FutureWarning)\ndef _register_pytree_node(cls: type[Any], flatten_fn: FlattenFunc, unflatten_fn: UnflattenFunc, *, serialized_type_name: Optional[str]=None, to_dumpable_context: Optional[ToDumpableContextFn]=None, from_dumpable_context: Optional[FromDumpableContextFn]=None) -> None:\n _private_register_pytree_node(cls, flatten_fn, unflatten_fn, serialized_type_name=serialized_type_name, to_dumpable_context=to_dumpable_context, from_dumpable_context=from_dumpable_context)", + "docstring": "Register a container-like type as pytree node for the C++ pytree only. The `torch.exporttorch.export` right now.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py", + "ast_data": "FunctionDef name:_register_pytree_node arg:cls arg:flatten_fn arg:unflatten_fn arguments arg arg arg arg arg arg Call Call" + }, + { + "library": "scrapy", + "name": "handshakeCompleted", + "source_code": "def handshakeCompleted(self) -> None:\n assert self.transport is not None\n if self.transport.negotiatedProtocol is not None and self.transport.negotiatedProtocol != PROTOCOL_NAME:\n self._lose_connection_with_error([InvalidNegotiatedProtocol(self.transport.negotiatedProtocol)])", + "docstring": "Close the connection if it's not made via the expected protocol", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py", + "ast_data": "FunctionDef name:handshakeCompleted arg:self arguments arg Compare If BoolOp Compare Compare Call Call" + }, + { + "library": "django", + "name": "postgis_lib_version", + "source_code": "def postgis_lib_version(self):\n return self._get_postgis_func('postgis_lib_version')", + "docstring": "Return the version number of the PostGIS library used with PostgreSQL.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py", + "ast_data": "FunctionDef name:postgis_lib_version arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "make_layoutgrids_gs", + "source_code": "def make_layoutgrids_gs(layoutgrids, gs):\n if gs in layoutgrids or gs.figure is None:\n return layoutgrids\n layoutgrids['hasgrids'] = True\n if not hasattr(gs, '_subplot_spec'):\n parent = layoutgrids[gs.figure]\n layoutgrids[gs] = mlayoutgrid.LayoutGrid(parent=parent, parent_inner=True, name='gridspec', ncols=gs._ncols, nrows=gs._nrows, width_ratios=gs.get_width_ratios(), height_ratios=gs.get_height_ratios())\n else:\n subplot_spec = gs._subplot_spec\n parentgs = subplot_spec.get_gridspec()\n if parentgs not in layoutgrids:\n layoutgrids = make_layoutgrids_gs(layoutgrids, parentgs)\n subspeclb = layoutgrids[parentgs]\n rep = (gs, 'top')\n if rep not in layoutgrids:\n layoutgrids[rep] = mlayoutgrid.LayoutGrid(parent=subspeclb, name='top', nrows=1, ncols=1, parent_pos=(subplot_spec.rowspan, subplot_spec.colspan))\n layoutgrids[gs] = mlayoutgrid.LayoutGrid(parent=layoutgrids[rep], name='gridspec', nrows=gs._nrows, ncols=gs._ncols, width_ratios=gs.get_width_ratios(), height_ratios=gs.get_height_ratios())\n return layoutgrids", + "docstring": "Make the layoutgrid for a gridspec (and anything nested in the gridspec)", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\_constrained_layout.py", + "ast_data": "FunctionDef name:make_layoutgrids_gs arg:layoutgrids arg:gs arguments arg arg If BoolOp Compare Compare Return return:yes Assign If Call Assign Assign Call Call Call Assign Assign Call If Compare Assign Call Assign Assign If Compare Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_Underdetermined", + "source_code": "def _Underdetermined(op: ops.Operation, grad):\n a = op.inputs[0]\n b = op.inputs[1]\n l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)\n chol = linalg_ops._RegularizedGramianCholesky(a, l2_regularizer=l2_regularizer, first_kind=False)\n grad_b = linalg_ops.cholesky_solve(chol, math_ops.matmul(a, grad))\n tmp = linalg_ops.cholesky_solve(chol, b)\n a1 = math_ops.matmul(tmp, a, adjoint_a=True)\n a1 = -math_ops.matmul(grad_b, a1)\n a2 = grad - math_ops.matmul(a, grad_b, adjoint_a=True)\n a2 = math_ops.matmul(tmp, a2, adjoint_b=True)\n grad_a = a1 + a2\n return (grad_a, grad_b, None)", + "docstring": "Gradients for the underdetermined case of MatrixSolveLs. This is the backprop for the solution to the normal equations of the second kind: X = F(A, B) = A * (A*A^T + lambda*I)^{-1} * B that (for lambda=0) solve the least squares problem min ||X||_F subject to A*X = B.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py", + "ast_data": "FunctionDef name:_Underdetermined arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_horizontalalignment", + "source_code": "def set_horizontalalignment(self, align):\n _api.check_in_list(['center', 'right', 'left'], align=align)\n self._horizontalalignment = align\n self.stale = True", + "docstring": "Set the horizontal alignment relative to the anchor point. See also :doc:. Parameters ---------- align : {'left', 'center', 'right'}", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:set_horizontalalignment arg:self arg:align arguments arg arg Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, fetches):\n if isinstance(fetches, wrapt.ObjectProxy):\n self._fetch_type = type(fetches.__wrapped__)\n else:\n self._fetch_type = type(fetches)\n self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]\n self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)", + "docstring": "Creates a _ListFetchMapper. Args: fetches: List, tuple, or namedtuple of fetches.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:fetches arguments arg arg If Call Assign Call Assign Call Assign Call Assign Call" + }, + { + "library": "sphinx", + "name": "Acks", + "source_code": "class Acks(SphinxDirective):\n has_content = True\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = False\n option_spec: ClassVar[OptionSpec] = {}\n\n def run(self) -> list[Node]:\n children = self.parse_content_to_nodes()\n if len(children) != 1 or not isinstance(children[0], nodes.bullet_list):\n logger.warning(__('.. acks content is not a list'), location=(self.env.docname, self.lineno))\n return []\n return [addnodes.acks('', *children)]", + "docstring": "Directive for a list of names.", + "type": "class", + "file_path": "sphinx\\sphinx\\directives\\other.py", + "ast_data": "ClassDef name:Acks Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg Assign Call If BoolOp Compare Call Call Call Call Return return:no Return return:yes Call" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, laf: torch.Tensor, img: torch.Tensor) -> torch.Tensor:\n KORNIA_CHECK_LAF(laf)\n KORNIA_CHECK_SHAPE(img, ['B', 'C', 'H', 'W'])\n if laf.size(0) != img.size(0):\n raise ValueError(f'Batch size of laf and img should be the same. Got {img.size(0)}, {laf.size(0)}')\n B, N = laf.shape[:2]\n patches: torch.Tensor = extract_patches_from_pyramid(img, laf, self.patch_size).view(-1, 1, self.patch_size, self.patch_size)\n angles_radians: torch.Tensor = self.angle_detector(patches).view(B, N)\n prev_angle = get_laf_orientation(laf).view_as(angles_radians)\n laf_out: torch.Tensor = set_laf_orientation(laf, rad2deg(angles_radians) + prev_angle)\n return laf_out", + "docstring": "Run forward. Args: laf: :math: img: :math: Returns: LAF_out: :math:", + "type": "method", + "file_path": "kornia\\kornia\\feature\\orientation.py", + "ast_data": "FunctionDef name:forward arg:self arg:laf arg:img arguments arg arg arg Call Call If Compare Call Call Raise Call Call Call Assign Call Call Call Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_validate_shapes", + "source_code": "@staticmethod\ndef _validate_shapes(t_dir, f_dir, t, f1, f2):\n names = (d + s for d, s in zip((t_dir, f_dir, f_dir), ('', '1', '2')))\n for name, array in zip(names, [t, f1, f2]):\n if array.ndim > 1:\n raise ValueError(f'{name!r} is not 1-dimensional')\n if t.size > 1 and array.size > 1 and (t.size != array.size):\n msg = '{!r} has size {}, but {!r} has an unequal size of {}'.format(t_dir, t.size, name, array.size)\n raise ValueError(msg)", + "docstring": "Validate that t, f1 and f2 are 1-dimensional and have the same length.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:_validate_shapes arg:t_dir arg:f_dir arg:t arg:f1 arg:f2 arguments arg arg arg arg arg Assign Call For Call If Compare Raise Call If BoolOp Compare Compare Compare Assign Call Raise Call" + }, + { + "library": "cherrypy", + "name": "tee", + "source_code": "def tee(body):\n if 'no-cache' in response.headers.values('Pragma') or 'no-store' in response.headers.values('Cache-Control'):\n for chunk in body:\n yield chunk\n return\n output = []\n for chunk in body:\n output.append(chunk)\n yield chunk\n body = b''.join(output)\n if not body:\n cherrypy._cache.delete()\n else:\n cherrypy._cache.put((response.status, response.headers or {}, body, response.time), len(body))", + "docstring": "Tee response.body into a list.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\caching.py", + "ast_data": "FunctionDef name:tee arg:body arguments arg If BoolOp Compare Call Compare Call For Return return:no Assign For Call Assign Call If Call Call BoolOp Call" + }, + { + "library": "cryptography", + "name": "private_bytes", + "source_code": "@abc.abstractmethod\ndef private_bytes(self, encoding: _serialization.Encoding, format: _serialization.PrivateFormat, encryption_algorithm: _serialization.KeySerializationEncryption) -> bytes:\n pass", + "docstring": "The serialized bytes of the private key.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed25519.py", + "ast_data": "FunctionDef name:private_bytes arg:self arg:encoding arg:format arg:encryption_algorithm arguments arg arg arg arg" + }, + { + "library": "tensorflow", + "name": "_global_batch_size", + "source_code": "@property\ndef _global_batch_size(self):\n return True", + "docstring": "and use global batch size. assumes per-replica batching. Returns: Boolean.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py", + "ast_data": "FunctionDef name:_global_batch_size arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_outside", + "source_code": "def get_outside(self):\n return self._rgba_outside", + "docstring": "Get the color for out-of-range values.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:get_outside arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_merge_dicts", + "source_code": "def _merge_dicts(self, old=None, new=None):\n old = {} if old is None else old\n new = {} if new is None else new\n for k, v in new.items():\n val = old.get(k, None)\n if val is not None and val is not v:\n raise ValueError('Found different value for existing key (key:{} old_value:{} new_value:{}'.format(k, old[k], v))\n old[k] = v\n return old", + "docstring": "Helper to merge two dictionaries.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py", + "ast_data": "FunctionDef name:_merge_dicts arg:self arg:old arg:new arguments arg arg arg Assign Compare Assign Compare For Call Assign Call If BoolOp Compare Compare Raise Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_signature_defs", + "source_code": "def get_signature_defs(tflite_model):\n model = tflite_model\n if not isinstance(tflite_model, bytearray):\n model = bytearray(tflite_model)\n serialized_signature_def_map = signature_def_util.GetSignatureDefMap(model)\n\n def _deserialize(serialized):\n signature_def = meta_graph_pb2.SignatureDef()\n signature_def.ParseFromString(serialized)\n return signature_def\n return {k: _deserialize(v) for k, v in serialized_signature_def_map.items()}", + "docstring": "Get SignatureDef dict from the Metadata of a TfLite flatbuffer buffer. Args: tflite_model: TFLite model buffer to get the signature_def. Returns: dict containing serving names to SignatureDefs if exists, otherwise, empty dict. Raises: ValueError: tflite_model buffer does not contain a valid TFLite model. DecodeError: SignatureDef cannot be parsed from TfLite SignatureDef metadata.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\tools\\signature\\signature_def_utils.py", + "ast_data": "FunctionDef name:get_signature_defs arg:tflite_model arguments arg Assign If Call Assign Call Assign Call FunctionDef name:_deserialize arg:serialized arguments arg Assign Call Call Return return:yes Return return:yes Call Call" + }, + { + "library": "django", + "name": "chunked_cursor", + "source_code": "def chunked_cursor(self):\n return self.cursor()", + "docstring": "Return a cursor that tries to avoid caching in the database (if supported by the database), otherwise return a regular cursor.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\base.py", + "ast_data": "FunctionDef name:chunked_cursor arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "convert_to_tensor_or_sparse_tensor", + "source_code": "@tf_export(v1=['convert_to_tensor_or_sparse_tensor'])\ndef convert_to_tensor_or_sparse_tensor(value, dtype=None, name=None):\n if dtype is not None:\n dtype = dtypes.as_dtype(dtype)\n if isinstance(value, SparseTensorValue):\n value = SparseTensor.from_value(value)\n if isinstance(value, SparseTensor):\n if dtype and (not dtype.is_compatible_with(value.dtype)):\n raise RuntimeError(f'Sparse dtype mismatch. Requested: {dtype.name}, Actual: {value.dtype.name}')\n return value\n return ops.convert_to_tensor(value, dtype=dtype, name=name)", + "docstring": "Converts value to a or . Args: value: A , , or an object whose type has a registered conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of . name: Optional name to use if a new is created. Returns: A or based on . Raises: RuntimeError: If result type is incompatible with .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py", + "ast_data": "FunctionDef name:convert_to_tensor_or_sparse_tensor arg:value arg:dtype arg:name arguments arg arg arg If Compare Assign Call If Call Assign Call If Call If BoolOp Call Raise Call Return return:yes Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "print_invocation_results", + "source_code": "def print_invocation_results(result_store_dict: ResultDictType):\n print()\n if not result_store_dict:\n print('Found no ResultStore links for Bazel build/test invocations.')\n else:\n print(f'Found {len(result_store_dict)} ResultStore link(s) for Bazel invocations.\\nResultStore contains individual representations of each target that were run/built during the invocation.\\nThese results are generally easier to read than looking through the entire build log:\\n')\n i = 1\n for url, invocation_results in result_store_dict.items():\n line_str = f'Invocation #{i} ({invocation_results['status']}):\\n'\n command = invocation_results.get('command')\n if command:\n line_str += command\n else:\n line_str += \"Couldn't parse the bazel command, check inside the build log instead\"\n line_str += f'\\n{url}\\n'\n print(line_str)\n i += 1", + "docstring": "Prints out a short summary of the found ResultStore links (if any).", + "type": "function", + "file_path": "tensorflow\\ci\\official\\utilities\\extract_resultstore_links.py", + "ast_data": "FunctionDef name:print_invocation_results arg:result_store_dict arguments arg Call If Call Call Call Assign For Call Assign Assign Call If Call" + }, + { + "library": "pytorch", + "name": "TorchAODType", + "source_code": "class TorchAODType(Enum):\n INT1 = auto()\n INT2 = auto()\n INT3 = auto()\n INT4 = auto()\n INT5 = auto()\n INT6 = auto()\n INT7 = auto()", + "docstring": "Placeholder for dtypes that do not exist in PyTorch core yet.", + "type": "class", + "file_path": "pytorch\\torch\\ao\\quantization\\observer.py", + "ast_data": "ClassDef name:TorchAODType Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call" + }, + { + "library": "matplotlib", + "name": "get_stretch", + "source_code": "def get_stretch(self):\n return self._fontproperties.get_stretch()", + "docstring": "Return the font stretch as a string or a number. See Also -------- .font_manager.FontProperties.get_stretch", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:get_stretch arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "insert", + "source_code": "def insert(self, loc: int, item: Hashable, value: ArrayLike, refs=None) -> None:\n new_axis = self.items.insert(loc, item)\n if value.ndim == 2:\n value = value.T\n if len(value) > 1:\n raise ValueError(f'Expected a 1D array, got an array with shape {value.T.shape}')\n else:\n value = ensure_block_shape(value, ndim=self.ndim)\n bp = BlockPlacement(slice(loc, loc + 1))\n block = new_block_2d(values=value, placement=bp, refs=refs)\n if not len(self.blocks):\n self._blklocs = np.array([0], dtype=np.intp)\n self._blknos = np.array([0], dtype=np.intp)\n else:\n self._insert_update_mgr_locs(loc)\n self._insert_update_blklocs_and_blknos(loc)\n self.axes[0] = new_axis\n self.blocks += (block,)\n self._known_consolidated = False\n if get_option('performance_warnings') and sum((not block.is_extension for block in self.blocks)) > 100:\n warnings.warn('DataFrame is highly fragmented. This is usually the result of calling `frame.insert` many times, which has poor performance. Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`', PerformanceWarning, stacklevel=find_stack_level())", + "docstring": "Insert item at selected position. Parameters ---------- loc : int item : hashable value : np.ndarray or ExtensionArray refs : The reference tracking object of the value to set.", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:insert arg:self arg:loc arg:item arg:value arg:refs arguments arg arg arg arg arg Assign Call If Compare Assign If Compare Call Raise Call Assign Call Assign Call Call Assign Call If Call Assign Call Assign Call Call Call Assign Assign If BoolOp Call Compare Call Call Call" + }, + { + "library": "sphinx", + "name": "optional_int", + "source_code": "def optional_int(argument: str) -> int | None:\n if argument is None:\n return None\n else:\n value = int(argument)\n if value < 0:\n msg = 'negative value; must be positive or zero'\n raise ValueError(msg)\n return value", + "docstring": "Check for an integer argument or None value; raise `` if not.", + "type": "function", + "file_path": "sphinx\\sphinx\\directives\\__init__.py", + "ast_data": "FunctionDef name:optional_int arg:argument arguments arg If Compare Return return:no Assign Call If Compare Assign Raise Call Return return:yes" + }, + { + "library": "authlib", + "name": "create_token_response", + "source_code": "@hooked\ndef create_token_response(self):\n client = self.request.client\n scope = self.request.credential.get_scope()\n token = self.generate_token(user=self.request.user, scope=scope, include_refresh_token=client.check_grant_type('refresh_token'))\n log.debug('Issue token %r to %r', token, client)\n self.save_token(token)\n return (200, token, self.TOKEN_RESPONSE_HEADER)", + "docstring": "If the access token request is valid and authorized, the authorization server issues an access token and optional refresh token.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc8628\\device_code.py", + "ast_data": "FunctionDef name:create_token_response arg:self arguments arg Assign Assign Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "trainable_variables", + "source_code": "@property\ndef trainable_variables(self):\n if not self._variables_created:\n return []\n return self._template_store.trainable_variables()", + "docstring": "Returns the list of trainable variables created by the Template.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py", + "ast_data": "FunctionDef name:trainable_variables arg:self arguments arg If Return return:no Return return:yes Call" + }, + { + "library": "pytorch", + "name": "step", + "source_code": "@torch.no_grad()\ndef step(self, closure=None):\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n for group in self.param_groups:\n params_with_grad: list[Tensor] = []\n grads: list[Tensor] = []\n exp_avgs: list[Tensor] = []\n exp_avg_sqs: list[Tensor] = []\n state_steps: list[int] = []\n beta1, beta2 = group['betas']\n maximize = group.get('maximize', False)\n for p in group['params']:\n if p.grad is not None:\n params_with_grad.append(p)\n if not p.grad.is_sparse:\n raise RuntimeError('SparseAdam does not support dense gradients, please consider Adam instead')\n grads.append(p.grad)\n state = self.state[p]\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)\n state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)\n exp_avgs.append(state['exp_avg'])\n exp_avg_sqs.append(state['exp_avg_sq'])\n state['step'] += 1\n state_steps.append(state['step'])\n F.sparse_adam(params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps, eps=group['eps'], beta1=beta1, beta2=beta2, lr=_to_scalar(group['lr']), maximize=maximize)\n return loss", + "docstring": "Perform a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\sparse_adam.py", + "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Assign If Compare With Call Assign Call For Assign Assign Call For If Compare Call If Raise Call Call Assign If Compare Call Assign Assign Call Assign Call Call Call Call Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_alpha", + "source_code": "def set_alpha(self, alpha):\n if alpha is not None:\n self._alpha = alpha\n self._forced_alpha = True\n else:\n self._alpha = 1.0\n self._forced_alpha = False\n self.set_foreground(self._rgb, isRGBA=True)", + "docstring": "Set the alpha value used for blending - not supported on all backends. If `` will override them.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:set_alpha arg:self arg:alpha arguments arg arg If Compare Assign Assign Assign Assign Call" + }, + { + "library": "scipy", + "name": "_minor_reduce", + "source_code": "def _minor_reduce(self, ufunc, data=None):\n if data is None:\n data = self.data\n major_index = np.flatnonzero(np.diff(self.indptr))\n value = ufunc.reduceat(data, downcast_intp_index(self.indptr[major_index]))\n return (major_index, value)", + "docstring": "Reduce nonzeros with a ufunc over the minor axis when non-empty Can be applied to a function of self.data by supplying data parameter. Warning: this does not call sum_duplicates() Returns ------- major_index : array of ints Major indices where nonzero value : array of self.dtype Reduce result for nonzeros in each major_index", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_compressed.py", + "ast_data": "FunctionDef name:_minor_reduce arg:self arg:ufunc arg:data arguments arg arg arg If Compare Assign Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_file", + "source_code": "def get_file(self):\n return self._file", + "docstring": "Return the filename of the associated font.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py", + "ast_data": "FunctionDef name:get_file arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "sym_max", + "source_code": "def sym_max(a, b):\n if overrides.has_torch_function((a, b)):\n return overrides.handle_torch_function(sym_max, (a, b), a, b)\n if isinstance(a, (SymInt, SymFloat)):\n return a.__sym_max__(b)\n elif isinstance(b, (SymInt, SymFloat)):\n return b.__sym_max__(a)\n all_types, float_types = __all_and_float_types()\n assert isinstance(a, all_types), type(a)\n assert isinstance(b, all_types), type(b)\n if isinstance(a, float_types) or isinstance(b, float_types):\n return builtins.float(builtins.max(a, b))\n else:\n return builtins.max(a, b)", + "docstring": "SymInt-aware utility for max which avoids branching on a < b. Unlike builtins.max(), this only works for int/float, and it always promotes to float if any argument is float (unlike builtins.max, which will faithfully preserve the type of the input argument).", + "type": "function", + "file_path": "pytorch\\torch\\__init__.py", + "ast_data": "FunctionDef name:sym_max arg:a arg:b arguments arg arg If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Assign Call Call Call Call Call If BoolOp Call Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__call__", + "source_code": "def __call__(self, x, pos=None):\n if len(self.locs) == 0:\n return ''\n else:\n xp = (x - self.offset) / 10.0 ** self.orderOfMagnitude\n if abs(xp) < 1e-08:\n xp = 0\n return self._format_maybe_minus_and_locale(self.format, xp)", + "docstring": "Return the format for tick value *x* at position *pos*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg If Compare Call Return return:yes Assign If Compare Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "initializer", + "source_code": "@property\ndef initializer(self):\n return self._initializer_op", + "docstring": "The op responsible for initializing this variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:initializer arg:self arguments arg Return return:yes" + }, + { + "library": "numpy", + "name": "__call__", + "source_code": "def __call__(self, func, *args, **kwargs):\n old_name = self.old_name\n new_name = self.new_name\n message = self.message\n if old_name is None:\n old_name = func.__name__\n if new_name is None:\n depdoc = f'`{old_name}` is deprecated!'\n else:\n depdoc = f'`{old_name}` is deprecated, use `{new_name}` instead!'\n if message is not None:\n depdoc += '\\n' + message\n\n @functools.wraps(func)\n def newfunc(*args, **kwds):\n warnings.warn(depdoc, DeprecationWarning, stacklevel=2)\n return func(*args, **kwds)\n newfunc.__name__ = old_name\n doc = func.__doc__\n if doc is None:\n doc = depdoc\n else:\n lines = doc.expandtabs().split('\\n')\n indent = _get_indent(lines[1:])\n if lines[0].lstrip():\n doc = indent * ' ' + doc\n else:\n skip = len(lines[0]) + 1\n for line in lines[1:]:\n if len(line) > indent:\n break\n skip += len(line) + 1\n doc = doc[skip:]\n depdoc = textwrap.indent(depdoc, ' ' * indent)\n doc = f'{depdoc}\\n\\n{doc}'\n newfunc.__doc__ = doc\n return newfunc", + "docstring": "Decorator call. Refer to ``.", + "type": "method", + "file_path": "numpy\\numpy\\lib\\_utils_impl.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:func arguments arg arg arg arg Assign Assign Assign If Compare Assign If Compare Assign Assign If Compare FunctionDef name:newfunc arguments arg arg Call Return return:yes Call Call Assign Assign If Compare Assign Assign Call Call Assign Call If Call Assign Assign Call For If Compare Call Call Assign Assign Call Assign Assign Return return:yes" + }, + { + "library": "kornia", + "name": "save", + "source_code": "def save(self, images: Union[Tensor, list[Tensor]], edge_maps: Optional[Union[Tensor, list[Tensor]]]=None, directory: Optional[str]=None, output_type: str='torch') -> None:\n outputs = self.visualize(images, edge_maps, output_type)\n self._save_outputs(images, directory, suffix='_src')\n self._save_outputs(outputs, directory, suffix='_edge')", + "docstring": "Save the edge detection results. Args: images: input tensor. edge_maps: detected edges. output_type: type of the output. directory: where to save outputs. Returns: output tensor.", + "type": "method", + "file_path": "kornia\\kornia\\models\\edge_detection\\base.py", + "ast_data": "FunctionDef name:save arg:self arg:images arg:edge_maps arg:directory arg:output_type arguments arg arg arg arg arg Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "get_slice_numels", + "source_code": "@staticmethod\ndef get_slice_numels(dims: list[Expr]) -> list[Expr]:\n numels = collections.deque([sympy.S.One])\n for dim in dims[:0:-1]:\n numel = dim * numels[0]\n numels.appendleft(numel)\n return [*numels]", + "docstring": "Compute the cumulative size of each dimension's slice. This proceeds from the last dim up to the second.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\block_analysis.py", + "ast_data": "FunctionDef name:get_slice_numels arg:dims arguments arg Assign Call For Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "concat_horizontal", + "source_code": "@classmethod\ndef concat_horizontal(cls, mgrs: list[Self], axes: list[Index]) -> Self:\n offset = 0\n blocks: list[Block] = []\n for mgr in mgrs:\n for blk in mgr.blocks:\n nb = blk.slice_block_columns(slice(None))\n nb._mgr_locs = nb._mgr_locs.add(offset)\n blocks.append(nb)\n offset += len(mgr.items)\n new_mgr = cls(tuple(blocks), axes)\n return new_mgr", + "docstring": "Concatenate uniformly-indexed BlockManagers horizontally.", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:concat_horizontal arg:cls arg:mgrs arg:axes arguments arg arg arg Assign For For Assign Call Call Assign Call Call Call Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "format_index_names", + "source_code": "def format_index_names(self, formatter: ExtFormatter | None=None, axis: Axis=0, level: Level | list[Level] | None=None, na_rep: str | None=None, precision: int | None=None, decimal: str='.', thousands: str | None=None, escape: str | None=None, hyperlinks: str | None=None) -> StylerRenderer:\n axis = self.data._get_axis_number(axis)\n if axis == 0:\n display_funcs_, obj = (self._display_funcs_index_names, self.index)\n else:\n display_funcs_, obj = (self._display_funcs_column_names, self.columns)\n levels_ = refactor_levels(level, obj)\n if all((formatter is None, level is None, precision is None, decimal == '.', thousands is None, na_rep is None, escape is None, hyperlinks is None)):\n display_funcs_.clear()\n return self\n if not isinstance(formatter, dict):\n formatter = dict.fromkeys(levels_, formatter)\n else:\n formatter = {obj._get_level_number(level): formatter_ for level, formatter_ in formatter.items()}\n for lvl in levels_:\n format_func = _maybe_wrap_formatter(formatter.get(lvl), na_rep=na_rep, precision=precision, decimal=decimal, thousands=thousands, escape=escape, hyperlinks=hyperlinks)\n display_funcs_[lvl] = format_func\n return self", + "docstring": "Format the text display value of index names or column names. .. versionadded:: 3.0 Parameters ---------- formatter : str, callable, dict or None Object to define how values are displayed. See notes. axis : {0, \"index\", 1, \"columns\"} Whether to apply the formatter to the index or column headers. level : int, str, list The level(s) over which to apply the generic formatter. na_rep : str, optional Representation for missing values. If ```formatterStyler.format_indexnamesStyler.format_index_namesStyler.to_excel`, since Excel and Python have inherently different formatting structures. Examples -------- >>> df = pd.DataFrame( ... [[1, 2], [3, 4]], ... index=pd.Index([\"a\", \"b\"], name=\"idx\"), ... ) >>> df # doctest: +SKIP 0 1 idx a 1 2 b 3 4 >>> df.style.format_index_names(lambda x: x.upper(), axis=0) # doctest: +SKIP 0 1 IDX a 1 2 b 3 4", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\style_render.py", + "ast_data": "FunctionDef name:format_index_names arg:self arg:formatter arg:axis arg:level arg:na_rep arg:precision arg:decimal arg:thousands arg:escape arg:hyperlinks arguments arg arg arg arg arg arg arg arg arg arg Assign Call If Compare Assign Assign Assign Call If Call Compare Compare Compare Compare Compare Compare Compare Compare Call Return return:yes If Call Assign Call Assign Call Call For Assign Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_apply_sparse_duplicate_indices", + "source_code": "def _apply_sparse_duplicate_indices(self, grad, var):\n summed_values, unique_indices = _deduplicate_indexed_slices(values=grad.values, indices=grad.indices)\n gradient_no_duplicate_indices = indexed_slices.IndexedSlices(indices=unique_indices, values=summed_values, dense_shape=grad.dense_shape)\n return self._apply_sparse(gradient_no_duplicate_indices, var)", + "docstring": "Add ops to apply sparse gradients to , with repeated sparse indices. Optimizers which override this method must deal with IndexedSlices objects such as the following: IndexedSlicesValue(values=[1, 1], indices=[0, 0], dense_shape=[1]) The correct interpretation is: IndexedSlicesValue(values=[2], indices=[0], dense_shape=[1]) Many optimizers deal incorrectly with repeated indices when updating based on sparse gradients (e.g. summing squares rather than squaring the sum, or applying momentum terms multiple times). Adding first is always the correct behavior, so this is enforced here by reconstructing the IndexedSlices to have only unique indices, then calling _apply_sparse. Optimizers which deal correctly with repeated indices may instead override this method to avoid the overhead of summing indices. Args: grad: . var: A object. Returns: An .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py", + "ast_data": "FunctionDef name:_apply_sparse_duplicate_indices arg:self arg:grad arg:var arguments arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "compute_ctc_loss", + "source_code": "@custom_gradient.custom_gradient\ndef compute_ctc_loss(logits_t, labels_t, label_length_t, logit_length_t, *unique_t):\n logits_t.set_shape(logits.shape)\n labels_t.set_shape(labels.shape)\n label_length_t.set_shape(label_length.shape)\n logit_length_t.set_shape(logit_length.shape)\n kwargs = dict(logits=logits_t, labels=labels_t, label_length=label_length_t, logit_length=logit_length_t)\n if unique_t:\n kwargs['unique'] = unique_t\n result = ctc_loss_and_grad(**kwargs)\n\n def grad(grad_loss):\n grad = [array_ops.reshape(grad_loss, [1, -1, 1]) * result[1]]\n grad += [None] * (len(args) - len(grad))\n return grad\n return (result[0], grad)", + "docstring": "Compute CTC loss.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ctc_ops.py", + "ast_data": "FunctionDef name:compute_ctc_loss arg:logits_t arg:labels_t arg:label_length_t arg:logit_length_t arguments arg arg arg arg arg Call Call Call Call Assign Call If Assign Assign Call FunctionDef name:grad arg:grad_loss arguments arg Assign Call Call Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "draw_artist", + "source_code": "def draw_artist(self, a):\n a.draw(self.get_figure(root=True).canvas.get_renderer())", + "docstring": "Efficiently redraw a single artist.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:draw_artist arg:self arg:a arguments arg arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "fftshift", + "source_code": "@tf_export('signal.fftshift')\n@dispatch.add_dispatch_support\ndef fftshift(x, axes=None, name=None):\n with _ops.name_scope(name, 'fftshift') as name:\n x = _ops.convert_to_tensor(x)\n if axes is None:\n axes = tuple(range(x.shape.ndims))\n shift = _array_ops.shape(x) // 2\n elif isinstance(axes, int):\n shift = _array_ops.shape(x)[axes] // 2\n else:\n rank = _array_ops.rank(x)\n axes = _array_ops.where(_math_ops.less(axes, 0), axes + rank, axes)\n shift = _array_ops.gather(_array_ops.shape(x), axes) // 2\n return manip_ops.roll(x, shift, axes, name)", + "docstring": "Shift the zero-frequency component to the center of the spectrum. This function swaps half-spaces for all axes listed (defaults to all). Note that `TensorinttupleTensor`, The shifted tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\fft_ops.py", + "ast_data": "FunctionDef name:fftshift arg:x arg:axes arg:name arguments arg arg arg With Call Assign Call If Compare Assign Call Call Assign Call If Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "get_global_rank", + "source_code": "def get_global_rank(group: ProcessGroup, group_rank: int) -> int:\n if group is GroupMember.WORLD:\n return group_rank\n if group not in _world.pg_group_ranks:\n raise ValueError(f'Group {group} is not registered, please create group with torch.distributed.new_group API')\n for rank, grp_rank in _world.pg_group_ranks[group].items():\n if grp_rank == group_rank:\n return rank\n raise ValueError(f'Group rank {group_rank} is not part of group {group}')", + "docstring": "Translate a group rank into a global rank. `group` N.B. calling this function on the default process group returns identity", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:get_global_rank arg:group arg:group_rank arguments arg arg If Compare Return return:yes If Compare Raise Call For Call If Compare Return return:yes Raise Call" + }, + { + "library": "tensorflow", + "name": "_AddNextAndBackEdge", + "source_code": "def _AddNextAndBackEdge(m, v, enforce_shape_invariant=True):\n if isinstance(m, tensor_lib.Tensor):\n v = ops.convert_to_tensor(v)\n v = _NextIteration(v)\n if enforce_shape_invariant:\n _EnforceShapeInvariant(m, v)\n m.op._update_input(1, v)\n elif isinstance(m, composite_tensor.CompositeTensor):\n\n def update_component(m_component, v_component):\n m_component.op._update_input(1, v_component)\n if isinstance(m, indexed_slices.IndexedSlices):\n v = math_ops._as_indexed_slices(v, optimize=False)\n v = _NextIteration(v)\n return nest.map_structure(update_component, m, v, expand_composites=True)\n else:\n raise TypeError(f\"'m' must be a Tensor or CompositeTensor. Received: {type(m)}.\")\n return v", + "docstring": "Add NextIteration and back edge from v to m.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:_AddNextAndBackEdge arg:m arg:v arg:enforce_shape_invariant arguments arg arg arg If Call Assign Call Assign Call If Call Call If Call FunctionDef name:update_component arg:m_component arg:v_component arguments arg arg Call If Call Assign Call Assign Call Return return:yes Call Raise Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_clear_state", + "source_code": "def _clear_state(self):\n for var in ('train_score_', 'validation_score_'):\n if hasattr(self, var):\n delattr(self, var)", + "docstring": "Clear the state of the gradient boosting model.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py", + "ast_data": "FunctionDef name:_clear_state arg:self arguments arg For If Call Call" + }, + { + "library": "scrapy", + "name": "_get_request_cookies", + "source_code": "def _get_request_cookies(self, jar: CookieJar, request: Request) -> Sequence[Cookie]:\n if not request.cookies:\n return []\n cookies: Iterable[VerboseCookie]\n if isinstance(request.cookies, dict):\n cookies = tuple(({'name': k, 'value': v} for k, v in request.cookies.items()))\n else:\n cookies = request.cookies\n for cookie in cookies:\n cookie.setdefault('secure', urlparse_cached(request).scheme == 'https')\n formatted = filter(None, (self._format_cookie(c, request) for c in cookies))\n response = Response(request.url, headers={'Set-Cookie': formatted})\n return jar.make_cookies(response, request)", + "docstring": "Extract cookies from the Request.cookies attribute", + "type": "method", + "file_path": "scrapy\\scrapy\\downloadermiddlewares\\cookies.py", + "ast_data": "FunctionDef name:_get_request_cookies arg:self arg:jar arg:request arguments arg arg arg If Return return:no If Call Assign Call Call Assign For Call Compare Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "BaseDayArchiveView", + "source_code": "class BaseDayArchiveView(YearMixin, MonthMixin, DayMixin, BaseDateListView):\n\n def get_dated_items(self):\n year = self.get_year()\n month = self.get_month()\n day = self.get_day()\n date = _date_from_string(year, self.get_year_format(), month, self.get_month_format(), day, self.get_day_format())\n return self._get_dated_items(date)\n\n def _get_dated_items(self, date):\n lookup_kwargs = self._make_single_date_lookup(date)\n qs = self.get_dated_queryset(**lookup_kwargs)\n return (None, qs, {'day': date, 'previous_day': self.get_previous_day(date), 'next_day': self.get_next_day(date), 'previous_month': self.get_previous_month(date), 'next_month': self.get_next_month(date)})", + "docstring": "Base view for a list of objects published on a given day. This requires subclassing to provide a response mixin.", + "type": "class", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "ClassDef name:BaseDayArchiveView FunctionDef name:get_dated_items arg:self arguments arg Assign Call Assign Call Assign Call Assign Call Call Call Call Return return:yes Call FunctionDef name:_get_dated_items arg:self arg:date arguments arg arg Assign Call Assign Call Return return:yes Call Call Call Call" + }, + { + "library": "pytorch", + "name": "info_dict", + "source_code": "def info_dict(self) -> dict[str, Union[PrimitiveInfoType, list[PrimitiveInfoType]]]:\n return {}", + "docstring": "Information returned here is logged to the autotune log file when that is enabled.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "FunctionDef name:info_dict arg:self arguments arg Return return:no" + }, + { + "library": "scipy", + "name": "B", + "source_code": "@property\ndef B(self):\n return self._B", + "docstring": "Input matrix of the system.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:B arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "max_unpool2d", + "source_code": "def max_unpool2d(input: Tensor, indices: Tensor, kernel_size: BroadcastingList2[int], stride: Optional[BroadcastingList2[int]]=None, padding: BroadcastingList2[int]=0, output_size: Optional[BroadcastingList2[int]]=None) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(max_unpool2d, (input,), input, indices, kernel_size, stride=stride, padding=padding, output_size=output_size)\n kernel_size = _pair(kernel_size)\n if stride is not None:\n _stride = _pair(stride)\n else:\n _stride = kernel_size\n padding = _pair(padding)\n output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)\n return torch._C._nn.max_unpool2d(input, indices, output_size)", + "docstring": "Compute a partial inverse of :class:. See :class: for details.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:max_unpool2d arg:input arg:indices arg:kernel_size arg:stride arg:padding arg:output_size arguments arg arg arg arg arg arg If Call Return return:yes Call Assign Call If Compare Assign Call Assign Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n X = self._check_input(X, reset=True)\n if self.check_inverse and (not (self.func is None or self.inverse_func is None)):\n self._check_inverse_transform(X)\n return self", + "docstring": "Fit transformer by checking X. If `validate=Truefunc` can handle Input array. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object FunctionTransformer class instance.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_function_transformer.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call If BoolOp BoolOp Compare Compare Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "difference", + "source_code": "def difference(self, other) -> FrozenList:\n other = set(other)\n temp = [x for x in self if x not in other]\n return type(self)(temp)", + "docstring": "Returns a FrozenList with elements from other removed from self. Parameters ---------- other : array-like The array-like whose elements we are removing self. Returns ------- FrozenList The collection difference between self and other.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\frozen.py", + "ast_data": "FunctionDef name:difference arg:self arg:other arguments arg arg Assign Call Assign Compare Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "_float_to_float", + "source_code": "def _float_to_float(self, value):\n return _fr1(self._float_conv(value))", + "docstring": "Converts float to float. Parameters ---------- value : float value to be converted.", + "type": "method", + "file_path": "numpy\\numpy\\_core\\getlimits.py", + "ast_data": "FunctionDef name:_float_to_float arg:self arg:value arguments arg arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "__init__", + "source_code": "def __init__(self, rotation: So2, translation: Vector2 | Tensor) -> None:\n super().__init__()\n KORNIA_CHECK_TYPE(rotation, So2)\n if not isinstance(translation, (Vector2, Tensor)):\n raise TypeError(f'translation type is {type(translation)}')\n self._translation: Vector2 | Parameter\n self._rotation: So2 = rotation\n if isinstance(translation, Tensor):\n _check_se2_r_t_shape(rotation, translation)\n self._translation = Parameter(translation)\n else:\n self._translation = translation", + "docstring": "Construct the base class. Internally represented by a complex number and a translation 2-vector. Args: rotation: So2 group encompassing a rotation. translation: translation vector with the shape of :math:. Example: >>> so2 = So2.identity(1) >>> t = torch.ones((1, 2)) >>> se2 = Se2(so2, t) >>> se2 rotation: Parameter containing: tensor([1.+0.j], requires_grad=True) translation: Parameter containing: tensor([[1., 1.]], requires_grad=True)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:rotation arg:translation arguments arg arg arg Call Call Call If Call Raise Call Call If Call Call Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "_is_disk_usage_reset_each_run", + "source_code": "def _is_disk_usage_reset_each_run(self):\n return False", + "docstring": "Indicates whether disk usage is reset after each Session.run. Subclasses that clean up the disk usage after every run should override this protected method. Returns: () Whether the disk usage amount is reset to zero after each Session.run.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py", + "ast_data": "FunctionDef name:_is_disk_usage_reset_each_run arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "get_loggers", + "source_code": "def get_loggers():\n return [logging.getLogger(qname) for qname in log_registry.get_log_qnames()]", + "docstring": "Returns: a list of all registered loggers", + "type": "function", + "file_path": "pytorch\\torch\\_logging\\_internal.py", + "ast_data": "FunctionDef name:get_loggers arguments Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "get_offset", + "source_code": "def get_offset(self):\n return self._offset", + "docstring": "Return offset of the container.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", + "ast_data": "FunctionDef name:get_offset arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_square_flops", + "source_code": "@ops.RegisterStatistics('Square', 'flops')\ndef _square_flops(graph, node):\n return _unary_op_flops(graph, node)", + "docstring": "Compute flops for Square operation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py", + "ast_data": "FunctionDef name:_square_flops arg:graph arg:node arguments arg arg Return return:yes Call Call" + }, + { + "library": "cherrypy", + "name": "_old_process_multipart", + "source_code": "def _old_process_multipart(entity):\n process_multipart(entity)\n params = entity.params\n for part in entity.parts:\n if part.name is None:\n key = ntou('parts')\n else:\n key = part.name\n if part.filename is None:\n value = part.fullvalue()\n else:\n value = part\n if key in params:\n if not isinstance(params[key], list):\n params[key] = [params[key]]\n params[key].append(value)\n else:\n params[key] = value", + "docstring": "Behavior of 3.2 and lower. Deprecated and will be changed in 3.3.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\_cpreqbody.py", + "ast_data": "FunctionDef name:_old_process_multipart arg:entity arguments arg Call Assign For If Compare Assign Call Assign If Compare Assign Call Assign If Compare If Call Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "getitem", + "source_code": "def getitem(a, slice_spec):\n return _maybe_static(a)[slice_spec]", + "docstring": "A version of __getitem__ that eagerly evaluates if possible.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py", + "ast_data": "FunctionDef name:getitem arg:a arg:slice_spec arguments arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "staged_predict", + "source_code": "def staged_predict(self, X):\n for raw_predictions in self._staged_raw_predict(X):\n yield raw_predictions.ravel()", + "docstring": "Predict regression target at each stage for X. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``. Yields ------ y : generator of ndarray of shape (n_samples,) The predicted value of the input samples.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py", + "ast_data": "FunctionDef name:staged_predict arg:self arg:X arguments arg arg For Call Call" + }, + { + "library": "pandas", + "name": "_unwrap_setitem_indexer", + "source_code": "def _unwrap_setitem_indexer(self, indexer):\n return indexer", + "docstring": "For compatibility with 1D-only ExtensionArrays.", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\blocks.py", + "ast_data": "FunctionDef name:_unwrap_setitem_indexer arg:self arg:indexer arguments arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_SysImporter", + "source_code": "class _SysImporter(Importer):\n\n def import_module(self, module_name: str):\n return importlib.import_module(module_name)\n\n def whichmodule(self, obj: Any, name: str) -> str:\n return _pickle_whichmodule(obj, name)", + "docstring": "An importer that implements the default behavior of Python.", + "type": "class", + "file_path": "pytorch\\torch\\package\\importer.py", + "ast_data": "ClassDef name:_SysImporter FunctionDef name:import_module arg:self arg:module_name arguments arg arg Return return:yes Call FunctionDef name:whichmodule arg:self arg:obj arg:name arguments arg arg arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "_update_dim_sizes", + "source_code": "def _update_dim_sizes(dim_sizes, arg, core_dims):\n if not core_dims:\n return\n num_core_dims = len(core_dims)\n if arg.ndim < num_core_dims:\n raise ValueError('%d-dimensional argument does not have enough dimensions for all core dimensions %r' % (arg.ndim, core_dims))\n core_shape = arg.shape[-num_core_dims:]\n for dim, size in zip(core_dims, core_shape):\n if dim in dim_sizes:\n if size != dim_sizes[dim]:\n raise ValueError('inconsistent size for core dimension %r: %r vs %r' % (dim, size, dim_sizes[dim]))\n else:\n dim_sizes[dim] = size", + "docstring": "Incrementally check and update core dimension sizes for a single argument. Arguments --------- dim_sizes : Dict[str, int] Sizes of existing core dimensions. Will be updated in-place. arg : ndarray Argument to examine. core_dims : Tuple[str, ...] Core dimensions for this argument.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_function_base_impl.py", + "ast_data": "FunctionDef name:_update_dim_sizes arg:dim_sizes arg:arg arg:core_dims arguments arg arg arg If Return return:no Assign Call If Compare Raise Call Assign For Call If Compare If Compare Raise Call Assign" + }, + { + "library": "seaborn", + "name": "data_variable", + "source_code": "@property\ndef data_variable(self):\n if not self.univariate:\n raise AttributeError('This is not a univariate plot')\n return {'x', 'y'}.intersection(self.variables).pop()", + "docstring": "Return the variable with data for univariate plots.", + "type": "method", + "file_path": "seaborn\\seaborn\\distributions.py", + "ast_data": "FunctionDef name:data_variable arg:self arguments arg If Raise Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "relative_camera_motion", + "source_code": "def relative_camera_motion(R1: torch.Tensor, t1: torch.Tensor, R2: torch.Tensor, t2: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n KORNIA_CHECK_SHAPE(R1, ['*', '3', '3'])\n KORNIA_CHECK_SHAPE(R2, ['*', '3', '3'])\n KORNIA_CHECK_SHAPE(t1, ['*', '3', '1'])\n KORNIA_CHECK_SHAPE(t2, ['*', '3', '1'])\n R = R2 @ R1.transpose(-2, -1)\n t = t2 - R @ t1\n return (R, t)", + "docstring": "Compute the relative camera motion between two cameras. Given the motion parameters of two cameras, computes the motion parameters of the second one assuming the first one to be at the origin. If :math: and :math: are the camera motions, the computed relative motion is :math:. Args: R1: The first camera rotation matrix with shape :math:. t1: The first camera translation vector with shape :math:. R2: The second camera rotation matrix with shape :math:. t2: The second camera translation vector with shape :math:. Returns: A tuple with the relative rotation matrix and translation vector with the shape of :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\epipolar\\essential.py", + "ast_data": "FunctionDef name:relative_camera_motion arg:R1 arg:t1 arg:R2 arg:t2 arguments arg arg arg arg Call Call Call Call Assign Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "_gen_rows_with_counts", + "source_code": "def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:\n yield from zip(self._gen_non_null_counts(), self._gen_dtypes())", + "docstring": "Iterator with string representation of body data with counts.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\info.py", + "ast_data": "FunctionDef name:_gen_rows_with_counts arg:self arguments arg Call Call Call" + }, + { + "library": "pandas", + "name": "_to_dict_of_blocks", + "source_code": "def _to_dict_of_blocks(self):\n mgr = self._mgr\n return {k: self._constructor_from_mgr(v, axes=v.axes).__finalize__(self) for k, v in mgr.to_iter_dict()}", + "docstring": "Return a dict of dtype -> Constructor Types that each is a homogeneous dtype. Internal ONLY.", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:_to_dict_of_blocks arg:self arguments arg Assign Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "define_aliases", + "source_code": "def define_aliases(alias_d, cls=None):\n if cls is None:\n return functools.partial(define_aliases, alias_d)\n\n def make_alias(name):\n\n @functools.wraps(getattr(cls, name))\n def method(self, *args, **kwargs):\n return getattr(self, name)(*args, **kwargs)\n return method\n for prop, aliases in alias_d.items():\n exists = False\n for prefix in ['get_', 'set_']:\n if prefix + prop in vars(cls):\n exists = True\n for alias in aliases:\n method = make_alias(prefix + prop)\n method.__name__ = prefix + alias\n method.__doc__ = f'Alias for `{prefix + prop}`.'\n setattr(cls, prefix + alias, method)\n if not exists:\n raise ValueError(f'Neither getter nor setter exists for {prop!r}')\n\n def get_aliased_and_aliases(d):\n return {*d, *(alias for aliases in d.values() for alias in aliases)}\n preexisting_aliases = getattr(cls, '_alias_map', {})\n conflicting = get_aliased_and_aliases(preexisting_aliases) & get_aliased_and_aliases(alias_d)\n if conflicting:\n raise NotImplementedError(f'Parent class already defines conflicting aliases: {conflicting}')\n cls._alias_map = {**preexisting_aliases, **alias_d}\n return cls", + "docstring": "Class decorator for defining property aliases. Use as :: @_api.define_aliases({\"property\": [\"alias\", ...], ...}) class C: ... For each property, if the corresponding `.normalize_kwargs` (which assumes that higher priority aliases come last).", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\_api\\__init__.py", + "ast_data": "FunctionDef name:define_aliases arg:alias_d arg:cls arguments arg arg If Compare Return return:yes Call FunctionDef name:make_alias arg:name arguments arg FunctionDef name:method arg:self arguments arg arg arg Return return:yes Call Call Call Call Return return:yes For Call Assign For If Compare Call Assign For Assign Call Assign Assign Call If Raise Call FunctionDef name:get_aliased_and_aliases arg:d arguments arg Return return:yes Call Assign Call Assign Call Call If Raise Call Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, X):\n check_is_fitted(self)\n return self._transform(X, self.components_)", + "docstring": "Encode the data as a sparse combination of the dictionary atoms. Coding method is determined by the object parameter . Parameters ---------- X : ndarray of shape (n_samples, n_features) Test data to be transformed, must have the same number of features as the data used to train the model. Returns ------- X_new : ndarray of shape (n_samples, n_components) Transformed data.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py", + "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "SymbolicTensor", + "source_code": "@tf_export('__internal__.SymbolicTensor')\nclass SymbolicTensor(pywrap_tf_session.PyTensor, tensor_lib.Tensor):\n\n def __new__(cls, op, value_index, dtype, unique_id=None) -> 'SymbolicTensor':\n if unique_id is None:\n unique_id = uid()\n return pywrap_tf_session.PyTensor.__new__(SymbolicTensor, op, value_index, dtypes.as_dtype(dtype), unique_id)\n\n def __copy__(self) -> 'SymbolicTensor':\n cls = self.__class__\n result = cls.__new__(cls, self.op, self.value_index, self.dtype, self._id)\n result.__dict__.update(self.__dict__)\n return result", + "docstring": "A symbolic tensor from a graph or tf.function.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "ClassDef name:SymbolicTensor FunctionDef name:__new__ arg:cls arg:op arg:value_index arg:dtype arg:unique_id arguments arg arg arg arg arg If Compare Assign Call Return return:yes Call Call FunctionDef name:__copy__ arg:self arguments arg Assign Assign Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "SuperpointDescriptor", + "source_code": "class SuperpointDescriptor(Module):\n\n def __init__(self, input_feat_dim: int=128) -> None:\n super().__init__()\n self.relu = nn.ReLU(inplace=True)\n self.convPa = nn.Conv2d(input_feat_dim, 256, kernel_size=3, stride=1, padding=1)\n self.convPb = nn.Conv2d(256, 128, kernel_size=1, stride=1, padding=0)\n\n def forward(self, input_features: Tensor) -> Tensor:\n feat = self.relu(self.convPa(input_features))\n semi = self.convPb(feat)\n return semi", + "docstring": "Descriptor decoder based on the SuperPoint arcihtecture. Args: input_feat_dim: channel size of the input features. Returns: the semi-dense descriptors with shape (B, 128, H/4, W/4).", + "type": "class", + "file_path": "kornia\\kornia\\feature\\sold2\\backbones.py", + "ast_data": "ClassDef name:SuperpointDescriptor FunctionDef name:__init__ arg:self arg:input_feat_dim arguments arg arg Call Call Assign Call Assign Call Assign Call FunctionDef name:forward arg:self arg:input_features arguments arg arg Assign Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "log_cdf_laplace", + "source_code": "def log_cdf_laplace(x, name='log_cdf_laplace'):\n with ops.name_scope(name, values=[x]):\n x = ops.convert_to_tensor(x, name='x')\n lower_solution = -np.log(2.0) + x\n safe_exp_neg_x = math_ops.exp(-math_ops.abs(x))\n upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x)\n return array_ops.where_v2(x < 0.0, lower_solution, upper_solution)", + "docstring": "Log Laplace distribution function. This function calculates , where is the cumulative distribution function of the Laplace distribution, i.e. For numerical accuracy, is computed in different ways depending on , Args: x: of type , . name: Python string. A name for the operation (default=\"log_ndtr\"). Returns: with . Raises: TypeError: if is not handled.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\special_math.py", + "ast_data": "FunctionDef name:log_cdf_laplace arg:x arg:name arguments arg arg With Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call Compare" + }, + { + "library": "kornia", + "name": "batched_squared_norm", + "source_code": "def batched_squared_norm(x: Tensor, keepdim: bool=False) -> Tensor:\n return batched_dot_product(x, x, keepdim)", + "docstring": "Return the squared norm of a vector.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\linalg.py", + "ast_data": "FunctionDef name:batched_squared_norm arg:x arg:keepdim arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "DistributedIteratorV1", + "source_code": "class DistributedIteratorV1(input_lib.DistributedIteratorBase):\n\n @property\n def _initializer(self):\n init_ops = []\n for it in self._iterators:\n init_ops.extend(it.initialize())\n return control_flow_ops.group(init_ops)\n\n @deprecated(None, \"Use the iterator's `initializer` property instead.\")\n def initialize(self):\n return self._initializer\n\n @property\n def initializer(self):\n return self.initialize()\n\n @property\n def output_classes(self):\n return self._iterators[0].output_classes\n\n @property\n def output_shapes(self):\n return self._iterators[0].output_shapes\n\n @property\n def output_types(self):\n return self._iterators[0].output_types\n\n def get_iterator(self, worker):\n for i, w in enumerate(self._input_workers.worker_devices):\n if worker == w:\n return self._iterators[i]\n return None\n\n @property\n def element_spec(self):\n return self._element_spec", + "docstring": "Input Iterator for a distributed dataset.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py", + "ast_data": "ClassDef name:DistributedIteratorV1 FunctionDef name:_initializer arg:self arguments arg Assign For Call Call Return return:yes Call FunctionDef name:initialize arg:self arguments arg Return return:yes Call FunctionDef name:initializer arg:self arguments arg Return return:yes Call FunctionDef name:output_classes arg:self arguments arg Return return:yes FunctionDef name:output_shapes arg:self arguments arg Return return:yes FunctionDef name:output_types arg:self arguments arg Return return:yes FunctionDef name:get_iterator arg:self arg:worker arguments arg arg For Call If Compare Return return:yes Return return:no FunctionDef name:element_spec arg:self arguments arg Return return:yes" + }, + { + "library": "numpy", + "name": "__init__", + "source_code": "def __init__(self, float_conv=float, int_conv=int, float_to_float=float, float_to_str=lambda v: f'{v:24.16e}', title='Python floating point number'):\n with errstate(under='ignore'):\n self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)", + "docstring": "float_conv - convert integer to float (array) int_conv - convert float (array) to integer float_to_float - convert float array to float float_to_str - convert array float to str title - description of used floating point numbers", + "type": "method", + "file_path": "numpy\\numpy\\_core\\_machar.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:float_conv arg:int_conv arg:float_to_float arg:float_to_str arg:title arguments arg arg arg arg arg arg arguments arg With Call Call" + }, + { + "library": "scikit-learn", + "name": "_fit_classifier_calibrator_pair", + "source_code": "def _fit_classifier_calibrator_pair(estimator, X, y, train, test, method, classes, sample_weight=None, fit_params=None):\n fit_params_train = _check_method_params(X, params=fit_params, indices=train)\n X_train, y_train = (_safe_indexing(X, train), _safe_indexing(y, train))\n X_test, y_test = (_safe_indexing(X, test), _safe_indexing(y, test))\n estimator.fit(X_train, y_train, **fit_params_train)\n predictions, _ = _get_response_values(estimator, X_test, response_method=['decision_function', 'predict_proba'])\n if predictions.ndim == 1:\n predictions = predictions.reshape(-1, 1)\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X, dtype=predictions.dtype)\n sw_test = _safe_indexing(sample_weight, test)\n else:\n sw_test = None\n calibrated_classifier = _fit_calibrator(estimator, predictions, y_test, classes, method, sample_weight=sw_test)\n return calibrated_classifier", + "docstring": "Fit a classifier/calibration pair on a given train/test split. Fit the classifier on the train set, compute its predictions on the test set and use the predictions as input to fit the calibrator along with the test labels. Parameters ---------- estimator : estimator instance Cloned base estimator. X : array-like, shape (n_samples, n_features) Sample data. y : array-like, shape (n_samples,) Targets. train : ndarray, shape (n_train_indices,) Indices of the training subset. test : ndarray, shape (n_test_indices,) Indices of the testing subset. method : {'sigmoid', 'isotonic'} Method to use for calibration. classes : ndarray, shape (n_classes,) The target classes. sample_weight : array-like, default=None Sample weights for . fit_params : dict, default=None Parameters to pass to the method of the underlying classifier. Returns ------- calibrated_classifier : _CalibratedClassifier instance", + "type": "function", + "file_path": "scikit-learn\\sklearn\\calibration.py", + "ast_data": "FunctionDef name:_fit_classifier_calibrator_pair arg:estimator arg:X arg:y arg:train arg:test arg:method arg:classes arg:sample_weight arg:fit_params arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Call Call Assign Call Call Call Assign Call If Compare Assign Call If Compare Assign Call Assign Call Assign Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "contains", + "source_code": "def contains(self, mouseevent):\n if self._different_canvas(mouseevent):\n return (False, {})\n if self._invalidy or self._invalidx:\n self.recache()\n if len(self._xy) == 0:\n return (False, {})\n transformed_path = self._get_transformed_path()\n path, affine = transformed_path.get_transformed_path_and_affine()\n path = affine.transform_path(path)\n xy = path.vertices\n xt = xy[:, 0]\n yt = xy[:, 1]\n fig = self.get_figure(root=True)\n if fig is None:\n _log.warning('no figure set when check if mouse is on line')\n pixels = self._pickradius\n else:\n pixels = fig.dpi / 72.0 * self._pickradius\n with np.errstate(all='ignore'):\n if self._linestyle in ['None', None]:\n ind, = np.nonzero((xt - mouseevent.x) ** 2 + (yt - mouseevent.y) ** 2 <= pixels ** 2)\n else:\n ind = segment_hits(mouseevent.x, mouseevent.y, xt, yt, pixels)\n if self._drawstyle.startswith('steps'):\n ind //= 2\n ind += self.ind_offset\n return (len(ind) > 0, dict(ind=ind))", + "docstring": "Test whether *mouseevent* occurred on the line. An event is deemed to have occurred \"on\" the line if it is less than `~.Line2D.get_pickradius~.Line2D.set_pickradius~matplotlib.backend_bases.MouseEvent`, where *pointlist* is a list of points of the line that are within the pickradius around the event position. TODO: sort returned indices by distance", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\lines.py", + "ast_data": "FunctionDef name:contains arg:self arg:mouseevent arguments arg arg If Call Return return:yes If BoolOp Call If Compare Call Return return:yes Assign Call Assign Call Assign Call Assign Assign Assign Assign Call If Compare Call Assign Assign With Call If Compare Assign Call Compare Assign Call If Call Return return:yes Compare Call Call" + }, + { + "library": "pandas", + "name": "isnull", + "source_code": "@doc(NDFrame.isna, klass=_shared_doc_kwargs['klass'])\ndef isnull(self) -> DataFrame:\n return self.isna()", + "docstring": "DataFrame.isnull is an alias for DataFrame.isna.", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:isnull arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "set_force_fallback", + "source_code": "def set_force_fallback(configval: str) -> None:\n torch._C._lazy._set_force_fallback(configval)", + "docstring": "Set the config used to force LTC fallback", + "type": "function", + "file_path": "pytorch\\torch\\_lazy\\config.py", + "ast_data": "FunctionDef name:set_force_fallback arg:configval arguments arg Call" + }, + { + "library": "cherrypy", + "name": "get_resource", + "source_code": "def get_resource(self, path):\n dispatch = self.app.find_config(path, 'request.dispatch', self.dispatch)\n dispatch(path)", + "docstring": "Call a dispatcher (which sets self.handler and .config). (Core)", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cprequest.py", + "ast_data": "FunctionDef name:get_resource arg:self arg:path arguments arg arg Assign Call Call" + }, + { + "library": "tensorflow", + "name": "greater_equal", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef greater_equal(x, y):\n return math_ops.greater_equal(x, y)", + "docstring": "Element-wise truth value of (x >= y). Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:greater_equal arg:x arg:y arguments arg arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "tolist", + "source_code": "def tolist(self):\n _mask = self._mask\n if _mask is nomask:\n return self._data.tolist()\n result = []\n for d, m in zip(self._data, self._mask):\n if m:\n result.append(None)\n else:\n result.append(d.item())\n return tuple(result)", + "docstring": "Transforms the mvoid object into a tuple. Masked fields are replaced by None. Returns ------- returned_tuple Tuple of fields", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:tolist arg:self arguments arg Assign If Compare Return return:yes Call Assign For Call If Call Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_any_pandas_objects", + "source_code": "def _any_pandas_objects(terms) -> bool:\n return any((isinstance(term.value, PandasObject) for term in terms))", + "docstring": "Check a sequence of terms for instances of PandasObject.", + "type": "function", + "file_path": "pandas\\pandas\\core\\computation\\align.py", + "ast_data": "FunctionDef name:_any_pandas_objects arg:terms arguments arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_check_X", + "source_code": "def _check_X(self, X):\n X = super()._check_X(X)\n if self.binarize is not None:\n X = binarize(X, threshold=self.binarize)\n return X", + "docstring": "Validate X, used only in predict* methods.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\naive_bayes.py", + "ast_data": "FunctionDef name:_check_X arg:self arg:X arguments arg arg Assign Call Call If Compare Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "map_only", + "source_code": "def map_only(type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]], /) -> MapOnlyFn[FnAny[Any]]:\n if isinstance(type_or_types_or_pred, (type, tuple)) or (sys.version_info >= (3, 10) and isinstance(type_or_types_or_pred, types.UnionType)):\n\n def pred(x: Any) -> bool:\n return isinstance(x, type_or_types_or_pred)\n elif callable(type_or_types_or_pred):\n pred = type_or_types_or_pred\n else:\n raise TypeError('Argument must be a type, a tuple of types, or a callable.')\n\n def wrapper(func: Callable[[T], Any]) -> Callable[[Any], Any]:\n\n @functools.wraps(func)\n def wrapped(x: T) -> Any:\n if pred(x):\n return func(x)\n return x\n return wrapped\n return wrapper", + "docstring": "Suppose you are writing a tree_map over tensors, leaving everything else unchanged. Ordinarily you would have to write: def go(t): if isinstance(t, Tensor): return ... else: return t With this function, you only need to write: @map_only(Tensor) def go(t): return ... You can also directly use 'tree_map_only'", + "type": "function", + "file_path": "pytorch\\torch\\utils\\_pytree.py", + "ast_data": "FunctionDef name:map_only arguments arg If BoolOp Call BoolOp Compare Call FunctionDef name:pred arg:x arguments arg Return return:yes Call If Call Assign Raise Call FunctionDef name:wrapper arg:func arguments arg FunctionDef name:wrapped arg:x arguments arg If Call Return return:yes Call Return return:yes Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, ctx, config):\n super(AnfTransformer, self).__init__(ctx)\n if config is None:\n if gast_util.GAST2:\n literal_node_types = (gast.Num, gast.Str, gast.Bytes, gast.NameConstant, gast.Name)\n elif gast_util.GAST3:\n literal_node_types = (gast.Constant, gast.Name)\n else:\n assert False\n self._overrides = [(ASTEdgePattern(ANY, ANY, literal_node_types), LEAVE), (ASTEdgePattern(ANY, ANY, gast.expr), REPLACE)]\n else:\n self._overrides = config\n self._gensym = DummyGensym()\n self._pending_statements = []", + "docstring": "Creates an ANF transformer. Args: ctx: transformer.Context config: Configuration", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\common_transformers\\anf.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:ctx arg:config arguments arg arg arg Call Call If Compare If Assign If Assign Assign Call Call Assign Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "_benchmarkRunOpPrebuilt", + "source_code": "def _benchmarkRunOpPrebuilt(self, name, target, iters):\n times = []\n with ops.Graph().as_default():\n v = variables.Variable(random_ops.random_normal([]))\n with session.Session(target) as sess:\n sess.run(v.initializer)\n runner = sess.make_callable(v.op)\n runner()\n for _ in range(iters):\n start_time = time.time()\n runner()\n end_time = time.time()\n times.append(end_time - start_time)\n print('%s %f' % (name, np.median(times)))\n self.report_benchmark(iters=1, wall_time=np.median(times), name=name)", + "docstring": "Runs a microbenchmark to measure the cost of running an op. Reports the median cost of running a trivial (Variable) op. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. iters: The number of iterations to perform.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\session_benchmark.py", + "ast_data": "FunctionDef name:_benchmarkRunOpPrebuilt arg:self arg:name arg:target arg:iters arguments arg arg arg arg Assign With Call Call Assign Call Call With Call Call Assign Call Call For Call Assign Call Call Assign Call Call Call Call Call Call" + }, + { + "library": "pandas", + "name": "evaluate", + "source_code": "def evaluate(op, left_op, right_op, use_numexpr: bool=True):\n op_str = _op_str_mapping[op]\n if op_str is not None:\n if use_numexpr:\n return _evaluate(op, op_str, left_op, right_op)\n return _evaluate_standard(op, op_str, left_op, right_op)", + "docstring": "Evaluate and return the expression of the op on left_op and right_op. Parameters ---------- op : the actual operand left_op : left operand right_op : right operand use_numexpr : bool, default True Whether to try to use numexpr.", + "type": "function", + "file_path": "pandas\\pandas\\core\\computation\\expressions.py", + "ast_data": "FunctionDef name:evaluate arg:op arg:left_op arg:right_op arg:use_numexpr arguments arg arg arg arg Assign If Compare If Return return:yes Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "__call__", + "source_code": "def __call__(self, X, Y=None, eval_gradient=False):\n if Y is not None and eval_gradient:\n raise ValueError('Gradient can only be evaluated when Y is None.')\n if Y is None:\n K = self.noise_level * np.eye(_num_samples(X))\n if eval_gradient:\n if not self.hyperparameter_noise_level.fixed:\n return (K, self.noise_level * np.eye(_num_samples(X))[:, :, np.newaxis])\n else:\n return (K, np.empty((_num_samples(X), _num_samples(X), 0)))\n else:\n return K\n else:\n return np.zeros((_num_samples(X), _num_samples(Y)))", + "docstring": "Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Left argument of the returned kernel k(X, Y) Y : array-like of shape (n_samples_X, n_features) or list of object, default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) is evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Only supported when Y is None. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when eval_gradient is True.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:X arg:Y arg:eval_gradient arguments arg arg arg arg If BoolOp Compare Raise Call If Compare Assign Call Call If If Return return:yes Call Call Return return:yes Call Call Call Return return:yes Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "store", + "source_code": "def store(self, name: str, index: Expr, value: Any, mode: Any=None) -> None:\n self.store_buffer_names.add(name)", + "docstring": "Mock store function for memory planning to optimize allocations properly.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py", + "ast_data": "FunctionDef name:store arg:self arg:name arg:index arg:value arg:mode arguments arg arg arg arg arg Call" + }, + { + "library": "scikit-learn", + "name": "_validate_estimator", + "source_code": "def _validate_estimator(self):\n super()._validate_estimator(default=DecisionTreeRegressor(max_depth=3))", + "docstring": "Check the estimator and set the estimator_ attribute.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py", + "ast_data": "FunctionDef name:_validate_estimator arg:self arguments arg Call Call Call" + }, + { + "library": "scipy", + "name": "Dispatchable", + "source_code": "class Dispatchable:\n\n def __init__(self, value, dispatch_type, coercible=True):\n self.value = value\n self.type = dispatch_type\n self.coercible = coercible\n\n def __getitem__(self, index):\n return (self.type, self.value)[index]\n\n def __str__(self):\n return f'<{type(self).__name__}: type={self.type!r}, value={self.value!r}>'\n __repr__ = __str__", + "docstring": "A utility class which marks an argument with a specific dispatch type. Attributes ---------- value The value of the Dispatchable. type The type of the Dispatchable. Examples -------- >>> x = Dispatchable(1, str) >>> x , value=1> See Also -------- all_of_type Marks all unmarked parameters of a function. mark_as Allows one to create a utility function to mark as a given type.", + "type": "class", + "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py", + "ast_data": "ClassDef name:Dispatchable FunctionDef name:__init__ arg:self arg:value arg:dispatch_type arg:coercible arguments arg arg arg arg Assign Assign Assign FunctionDef name:__getitem__ arg:self arg:index arguments arg arg Return return:yes FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Assign" + }, + { + "library": "matplotlib", + "name": "_on_key_release", + "source_code": "def _on_key_release(self, event):\n if not self._selection_completed and (event.key == self._state_modifier_keys.get('move_vertex') or event.key == self._state_modifier_keys.get('move_all')):\n self._xys.append(self._get_data_coords(event))\n self._draw_polygon()\n elif event.key == self._state_modifier_keys.get('clear'):\n event = self._clean_event(event)\n self._xys = [self._get_data_coords(event)]\n self._selection_completed = False\n self._remove_box()\n self.set_visible(True)", + "docstring": "Key release event handler.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:_on_key_release arg:self arg:event arguments arg arg If BoolOp BoolOp Compare Call Compare Call Call Call Call If Compare Call Assign Call Assign Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_connect_jump_to_finally_sections", + "source_code": "def _connect_jump_to_finally_sections(self, node):\n cursor = set((node,))\n if node not in self.finally_sections:\n return cursor\n for guard_section_id in self.finally_sections[node]:\n guard_begin, guard_ends = self.finally_section_subgraphs[guard_section_id]\n self._connect_nodes(cursor, guard_begin)\n cursor = guard_ends\n del self.finally_sections[node]\n return cursor", + "docstring": "Connects a jump node to the finally sections protecting it.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py", + "ast_data": "FunctionDef name:_connect_jump_to_finally_sections arg:self arg:node arguments arg arg Assign Call If Compare Return return:yes For Assign Call Assign Return return:yes" + }, + { + "library": "sphinx", + "name": "get_table_type", + "source_code": "def get_table_type(self) -> str:\n if self.is_longtable():\n return 'longtable'\n elif self.has_verbatim:\n return 'tabular'\n elif self.colspec:\n return 'tabulary'\n elif self.has_problematic or (self.colwidths and 'colwidths-given' in self.classes):\n return 'tabular'\n else:\n return 'tabulary'", + "docstring": "Returns the LaTeX environment name for the table. The class currently supports: * longtable * tabular * tabulary", + "type": "method", + "file_path": "sphinx\\sphinx\\writers\\latex.py", + "ast_data": "FunctionDef name:get_table_type arg:self arguments arg If Call Return return:yes If Return return:yes If Return return:yes If BoolOp BoolOp Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "convert", + "source_code": "def convert(self):\n return super(TFLiteConverterV2, self).convert()", + "docstring": "Converts a TensorFlow GraphDef based on instance variables. Returns: The converted data in serialized format. Raises: ValueError: No concrete function is specified. Multiple concrete functions are specified. Input shape is not specified. Invalid quantization parameters.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:convert arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "NormalizedKernelMixin", + "source_code": "class NormalizedKernelMixin:\n\n def diag(self, X):\n return np.ones(X.shape[0])", + "docstring": "Mixin for kernels which are normalized: k(X, X)=1. .. versionadded:: 0.18", + "type": "class", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "ClassDef name:NormalizedKernelMixin FunctionDef name:diag arg:self arg:X arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "clamp", + "source_code": "def clamp(input: Tensor, min_: float, max_: float) -> Tensor:\n if not input.is_quantized:\n raise ValueError(\"Input to 'quantized.clamp' must be quantized!\")\n return torch.clamp(input, min_, max_)", + "docstring": "float(input, min\\_, max\\_) -> Tensor Applies the clamp function element-wise. See :class: for more details. Args: input: quantized input min_: minimum value for clamping max_: maximum value for clamping", + "type": "function", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py", + "ast_data": "FunctionDef name:clamp arg:input arg:min_ arg:max_ arguments arg arg arg If Raise Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "strip_newsgroup_footer", + "source_code": "def strip_newsgroup_footer(text):\n lines = text.strip().split('\\n')\n for line_num in range(len(lines) - 1, -1, -1):\n line = lines[line_num]\n if line.strip().strip('-') == '':\n break\n if line_num > 0:\n return '\\n'.join(lines[:line_num])\n else:\n return text", + "docstring": "Given text in \"news\" format, attempt to remove a signature block. As a rough heuristic, we assume that signatures are set apart by either a blank line or a line made of hyphens, and that it is the last such line in the file (disregarding blank lines at the end). Parameters ---------- text : str The text from which to remove the signature block.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\datasets\\_twenty_newsgroups.py", + "ast_data": "FunctionDef name:strip_newsgroup_footer arg:text arguments arg Assign Call Call For Call Call Assign If Compare Call Call If Compare Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "is_capturing_metal", + "source_code": "def is_capturing_metal() -> bool:\n return torch._C._mps_isCapturing()", + "docstring": "Cheks if metal capture is in progress", + "type": "function", + "file_path": "pytorch\\torch\\mps\\profiler.py", + "ast_data": "FunctionDef name:is_capturing_metal arguments Return return:yes Call" + }, + { + "library": "numpy", + "name": "cumulative_sum", + "source_code": "@array_function_dispatch(_cumulative_sum_dispatcher)\ndef cumulative_sum(x, /, *, axis=None, dtype=None, out=None, include_initial=False):\n return _cumulative_func(x, um.add, axis, dtype, out, include_initial)", + "docstring": "Return the cumulative sum of the elements along a given axis. This function is an Array API compatible alternative to . Parameters ---------- x : array_like Input array. axis : int, optional Axis along which the cumulative sum is computed. The default (None) is only allowed for one-dimensional arrays. For arrays with more than one dimension `ufuncs-output-typesum` >>> c = np.array([1, 2e-9, 3e-9] * 1000000) >>> np.cumulative_sum(c)[-1] 1000000.0050045159 >>> c.sum() 1000000.0050000029", + "type": "function", + "file_path": "numpy\\numpy\\_core\\fromnumeric.py", + "ast_data": "FunctionDef name:cumulative_sum arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "seaborn", + "name": "axlabel", + "source_code": "def axlabel(xlabel, ylabel, **kwargs):\n msg = 'This function is deprecated and will be removed in a future version'\n warnings.warn(msg, FutureWarning)\n ax = plt.gca()\n ax.set_xlabel(xlabel, **kwargs)\n ax.set_ylabel(ylabel, **kwargs)", + "docstring": "Grab current axis and label it. DEPRECATED: will be removed in a future version.", + "type": "function", + "file_path": "seaborn\\seaborn\\utils.py", + "ast_data": "FunctionDef name:axlabel arg:xlabel arg:ylabel arguments arg arg arg Assign Call Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "get_rng_state", + "source_code": "def get_rng_state() -> torch.Tensor:\n return default_generator.get_state()", + "docstring": "Returns the random number generator state as a . .. note:: The returned state is for the default generator on CPU only. See also: :func:.", + "type": "function", + "file_path": "pytorch\\torch\\random.py", + "ast_data": "FunctionDef name:get_rng_state arguments Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_register_test", + "source_code": "def _register_test(*test_metainfo):\n BENCHMARK_TESTER.append(test_metainfo)", + "docstring": "save the metainfo needed to create a test. Currently test_metainfo takes two different inputs: 1) This input when adds single op to the benchmark _register_test(configs, pt_bench_op, create_pytorch_op_test_case, run_backward=True) 2) This input when adds a list of ops to the benchmark _register_test(configs, pt_bench_op, create_pytorch_op_test_case, run_backward=False, op_name_function=op)", + "type": "function", + "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_core.py", + "ast_data": "FunctionDef name:_register_test arguments arg Call" + }, + { + "library": "django", + "name": "decorator_from_middleware", + "source_code": "def decorator_from_middleware(middleware_class):\n return make_middleware_decorator(middleware_class)()", + "docstring": "Given a middleware class (not an instance), return a view decorator. This lets you use middleware functionality on a per-view basis. The middleware is created with no params passed.", + "type": "function", + "file_path": "django\\django\\utils\\decorators.py", + "ast_data": "FunctionDef name:decorator_from_middleware arg:middleware_class arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "memory_stats_as_nested_dict", + "source_code": "def memory_stats_as_nested_dict(device: _device_t=None) -> dict[str, Any]:\n if not is_initialized():\n return {}\n device = _get_device_index(device, optional=True)\n return torch._C._xpu_memoryStats(device)", + "docstring": "Return the result of :func: as a nested dictionary.", + "type": "function", + "file_path": "pytorch\\torch\\xpu\\memory.py", + "ast_data": "FunctionDef name:memory_stats_as_nested_dict arg:device arguments arg If Call Return return:no Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "DataProcessorTemplateWrapper", + "source_code": "class DataProcessorTemplateWrapper:\n\n def __init__(self, wrapped_template_cls, preprocessor, postprocessor, **kwargs) -> None:\n if preprocessor is not None:\n self._preprocessor = preprocessor\n else:\n self._preprocessor = lambda x, y: (x, y)\n if postprocessor is not None:\n self._postprocessor = postprocessor\n else:\n self._postprocessor = lambda x: x\n assert 'input_nodes' in kwargs\n assert 'layout' in kwargs\n kwargs['input_nodes'], kwargs['layout'] = preprocessor(kwargs['input_nodes'], kwargs['layout'])\n self._wrapped = wrapped_template_cls(**kwargs)\n\n def __getattr__(self, name):\n return getattr(self._wrapped, name)\n\n def maybe_append_choice(self, choices, **kwargs):\n return type(self._wrapped).maybe_append_choice(self, choices, **kwargs)\n\n def generate(self, **kwargs):\n choice_caller = self._wrapped.generate(**kwargs)\n return DataProcessorChoiceCallerWrapper(choice_caller, self._preprocessor, self._postprocessor)\n\n def __repr__(self) -> str:\n return f'DataProcessorTemplateWrapper({self._wrapped})'", + "docstring": "A wrapper class for a kernel template. This class together with provides a convenient way to preprocess and postprocess data before and after using the wrapped template. A typical usage is to reorder or filter the input nodes in order to match the expected input of other kernel choices like a ATen kernel. A more complicated usage is to prepack the weights. See the example from :mod: for more details.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py", + "ast_data": "ClassDef name:DataProcessorTemplateWrapper FunctionDef name:__init__ arg:self arg:wrapped_template_cls arg:preprocessor arg:postprocessor arguments arg arg arg arg arg If Compare Assign Assign arguments arg arg If Compare Assign Assign arguments arg Compare Compare Assign Call Assign Call FunctionDef name:__getattr__ arg:self arg:name arguments arg arg Return return:yes Call FunctionDef name:maybe_append_choice arg:self arg:choices arguments arg arg arg Return return:yes Call Call FunctionDef name:generate arg:self arguments arg arg Assign Call Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "Identity", + "source_code": "class Identity(BasePruningMethod):\n PRUNING_TYPE = 'unstructured'\n\n def compute_mask(self, t, default_mask):\n mask = default_mask\n return mask\n\n @classmethod\n def apply(cls, module, name):\n return super().apply(module, name)", + "docstring": "Utility pruning method that does not prune any units but generates the pruning parametrization with a mask of ones.", + "type": "class", + "file_path": "pytorch\\torch\\nn\\utils\\prune.py", + "ast_data": "ClassDef name:Identity Assign FunctionDef name:compute_mask arg:self arg:t arg:default_mask arguments arg arg arg Assign Return return:yes FunctionDef name:apply arg:cls arg:module arg:name arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_current_replica_id_as_int", + "source_code": "def get_current_replica_id_as_int():\n replica_context = distribute_lib.get_replica_context()\n if replica_context:\n replica_id = replica_context._replica_id\n if not isinstance(replica_id, int):\n replica_id = tensor_util.constant_value(replica_id)\n else:\n replica_id = distribute_lib.get_update_replica_id()\n return replica_id", + "docstring": "Returns the current replica ID as an integer, or .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values_util.py", + "ast_data": "FunctionDef name:get_current_replica_id_as_int arguments Assign Call If Assign If Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "predict_proba", + "source_code": "@available_if(_search_estimator_has('predict_proba'))\ndef predict_proba(self, X):\n check_is_fitted(self)\n return self.best_estimator_.predict_proba(X)", + "docstring": "Call predict_proba on the estimator with the best found parameters. Only available if `Xclasses_`.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py", + "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Return return:yes Call Call Call" + }, + { + "library": "django", + "name": "get_distinct", + "source_code": "def get_distinct(self):\n result = []\n params = []\n opts = self.query.get_meta()\n for name in self.query.distinct_fields:\n parts = name.split(LOOKUP_SEP)\n _, targets, alias, joins, path, _, transform_function = self._setup_joins(parts, opts, None)\n targets, alias, _ = self.query.trim_joins(targets, joins, path)\n for target in targets:\n if name in self.query.annotation_select:\n result.append(self.connection.ops.quote_name(name))\n else:\n r, p = self.compile(transform_function(target, alias))\n result.append(r)\n params.append(p)\n return (result, params)", + "docstring": "Return a quoted list of fields to use in DISTINCT ON part of the query. This method can alter the tables in the query, and thus it must be called before get_from_clause().", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\compiler.py", + "ast_data": "FunctionDef name:get_distinct arg:self arguments arg Assign Assign Assign Call For Assign Call Assign Call Assign Call For If Compare Call Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "Definition", + "source_code": "class Definition(object):\n\n def __init__(self):\n self.param_of = None\n self.directives = {}\n\n def __repr__(self):\n return '%s[%d]' % (self.__class__.__name__, id(self))", + "docstring": "Definition objects describe a unique definition of a variable. Subclasses of this may be used by passing an appropriate factory function to resolve. Attributes: param_of: Optional[ast.AST] directives: Dict, optional definition annotations", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\reaching_definitions.py", + "ast_data": "ClassDef name:Definition FunctionDef name:__init__ arg:self arguments arg Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "create_unbacked_symint", + "source_code": "@record_shapeenv_event()\ndef create_unbacked_symint(self, source: Optional[Source]=None) -> SymInt:\n symbol: sympy.Symbol = make_symbol(SymT.UNBACKED_INT, next(self.unbacked_symint_counter), integer=True)\n if not self._ignore_fresh_unbacked_symbols_tls():\n self.pending_fresh_unbacked_symbols.append(symbol)\n self.counter['create_unbacked_symbol'] += 1\n self.var_to_stack[symbol] = CapturedTraceback.extract(skip=1)\n vr = self.var_to_range[symbol] = self._default_unspecified_value_range()\n assert vr.is_int\n sloc = self._get_sloc()\n self.var_to_range_sloc[symbol] = ValueRangesSLoc(sloc, sloc)\n fx_node = self._create_fx_placeholder_and_z3var(symbol, int)\n sym_node = SymNode(symbol, self, int, None, fx_node=fx_node)\n self._log_create_unbacked_symbol('create_unbacked_symint', symbol, vr, source, sym_node=sym_node)\n return SymInt(sym_node)", + "docstring": "Create a symbolic integer without a hint value", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:create_unbacked_symint arg:self arg:source arguments arg arg Call Call If Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "get_previous_month", + "source_code": "def get_previous_month(self, date):\n return _get_next_prev(self, date, is_previous=True, period='month')", + "docstring": "Get the previous valid month.", + "type": "method", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "FunctionDef name:get_previous_month arg:self arg:date arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "experimental_as_proto", + "source_code": "def experimental_as_proto(self) -> struct_pb2.TypeSpecProto:\n return nested_structure_coder.encode_structure(self).type_spec_value", + "docstring": "Returns a proto representation of the TypeSpec instance. Do NOT override for custom non-TF types.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", + "ast_data": "FunctionDef name:experimental_as_proto arg:self arguments arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "__radd__", + "source_code": "def __radd__(self, other):\n return add(other, self)", + "docstring": "Add other to self, and return a new masked array.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__radd__ arg:self arg:other arguments arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_assign_where", + "source_code": "def _assign_where(X1, X2, cond):\n if hasattr(X1, 'mask'):\n X1.mask(cond=cond, other=X2, inplace=True)\n else:\n X1[cond] = X2[cond]", + "docstring": "Assign X2 to X1 where cond is True. Parameters ---------- X1 : ndarray or dataframe of shape (n_samples, n_features) Data. X2 : ndarray of shape (n_samples, n_features) Data to be assigned. cond : ndarray of shape (n_samples, n_features) Boolean mask to assign data.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\impute\\_iterative.py", + "ast_data": "FunctionDef name:_assign_where arg:X1 arg:X2 arg:cond arguments arg arg arg If Call Call Assign" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n values = self.decision_function(X)\n is_inlier = np.full(values.shape[0], -1, dtype=int)\n is_inlier[values >= 0] = 1\n return is_inlier", + "docstring": "Predict labels (1 inlier, -1 outlier) of X according to fitted model. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. Returns ------- is_inlier : ndarray of shape (n_samples,) Returns -1 for anomalies/outliers and +1 for inliers.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\covariance\\_elliptic_envelope.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Assign Call Assign Compare Return return:yes" + }, + { + "library": "pandas", + "name": "_add_timedeltalike_scalar", + "source_code": "def _add_timedeltalike_scalar(self, other):\n if not isinstance(self.freq, Tick):\n raise raise_on_incompatible(self, other)\n if isna(other):\n return super()._add_timedeltalike_scalar(other)\n td = np.asarray(Timedelta(other).asm8)\n return self._add_timedelta_arraylike(td)", + "docstring": "Parameters ---------- other : timedelta, Tick, np.timedelta64 Returns ------- PeriodArray", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\period.py", + "ast_data": "FunctionDef name:_add_timedeltalike_scalar arg:self arg:other arguments arg arg If Call Raise Call If Call Return return:yes Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "converted_enclosing_graph", + "source_code": "@property\ndef converted_enclosing_graph(self):\n return self._enclosing_graph.converted_self()", + "docstring": "The graph being converted.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py", + "ast_data": "FunctionDef name:converted_enclosing_graph arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_custom_axis_name", + "source_code": "def _get_custom_axis_name(axis: Dim | str) -> str:\n if isinstance(axis, Dim):\n return axis.__name__\n return axis", + "docstring": "Get the custom axis name from a torch.export.Dim.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_dynamic_shapes.py", + "ast_data": "FunctionDef name:_get_custom_axis_name arg:axis arguments arg If Call Return return:yes Return return:yes" + }, + { + "library": "cherrypy", + "name": "gid", + "source_code": "@property\ndef gid(self):\n return self._gid", + "docstring": "The gid under which to run. Availability: Unix.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\plugins.py", + "ast_data": "FunctionDef name:gid arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_calc___package__", + "source_code": "def _calc___package__(globals):\n package = globals.get('__package__')\n spec = globals.get('__spec__')\n if package is not None:\n if spec is not None and package != spec.parent:\n _warnings.warn(f'__package__ != __spec__.parent ({package!r} != {spec.parent!r})', ImportWarning, stacklevel=3)\n return package\n elif spec is not None:\n return spec.parent\n else:\n _warnings.warn(\"can't resolve package from __spec__ or __package__, falling back on __name__ and __path__\", ImportWarning, stacklevel=3)\n package = globals['__name__']\n if '__path__' not in globals:\n package = package.rpartition('.')[0]\n return package", + "docstring": "Calculate what __package__ should be. __package__ is not guaranteed to be defined or could be set to None to represent that its proper value is unknown.", + "type": "function", + "file_path": "pytorch\\torch\\package\\_importlib.py", + "ast_data": "FunctionDef name:_calc___package__ arg:globals arguments arg Assign Call Assign Call If Compare If BoolOp Compare Compare Call Return return:yes If Compare Return return:yes Call Assign If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_device_ids", + "source_code": "def get_device_ids(mesh: layout_lib.Mesh, client_id: Optional[int]=None) -> List[int]:\n if mesh.device_type() != _TPU_DEVICE_TYPE:\n raise ValueError('The mesh must be a TPU mesh')\n if client_id is None or client_id == config.client_id():\n return mesh.local_device_ids()\n raise NotImplementedError(\"Looking up other clients' device IDs is not supported\")", + "docstring": "Returns the device IDs of all TPU cores local to the given client. A device ID is a non-negative integer that uniquely identifies a device in the mesh. For example, for a 2x2 mesh ('x', 'y'), this function returns a permutation of [0, 1, 2, 3]. Note that device IDs and device locations are equivalent. The former is a linearization of the latter along mesh dimensions. Args: mesh: A TPU mesh. client_id: Optional; A DTensor client ID. If empty, query this client.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\tpu_util.py", + "ast_data": "FunctionDef name:get_device_ids arg:mesh arg:client_id arguments arg arg If Compare Call Raise Call If BoolOp Compare Compare Call Return return:yes Call Raise Call" + }, + { + "library": "tensorflow", + "name": "_is_valid_predict_signature", + "source_code": "def _is_valid_predict_signature(signature_def):\n if signature_def.method_name != signature_constants.PREDICT_METHOD_NAME:\n return False\n if not signature_def.inputs.keys():\n return False\n if not signature_def.outputs.keys():\n return False\n return True", + "docstring": "Determine whether the argument is a servable 'predict' SignatureDef.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_def_utils_impl.py", + "ast_data": "FunctionDef name:_is_valid_predict_signature arg:signature_def arguments arg If Compare Return return:yes If Call Return return:yes If Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "GlobalContext", + "source_code": "class GlobalContext(Checkpointable[GlobalContextCheckpointState]):\n _supported_global_states = {'grad_enabled', 'autocast_enabled', 'autocast_cpu_enabled', 'autocast_gpu_dtype', 'autocast_cpu_dtype', 'autocast_cache_enabled'}\n\n def __init__(self) -> None:\n self.global_state: dict[str, tuple[Callable, ...]] = {}\n\n def copy_graphstate(self):\n return GlobalContextCheckpointState(dict(self.global_state))\n\n def restore_graphstate(self, state):\n assert isinstance(state, GlobalContextCheckpointState)\n self.global_state = state.global_state\n assert len(self.global_state) == len(self._supported_global_states) and set(self.global_state.keys()) == self._supported_global_states, 'Global state mismatch'\n for func, args in self.global_state.values():\n func(args)", + "docstring": "This keeps track of the global torch state during tracing of a function. For example, torch.is_grad_enabled.", + "type": "class", + "file_path": "pytorch\\torch\\_guards.py", + "ast_data": "ClassDef name:GlobalContext Assign FunctionDef name:__init__ arg:self arguments arg FunctionDef name:copy_graphstate arg:self arguments arg Return return:yes Call Call FunctionDef name:restore_graphstate arg:self arg:state arguments arg arg Call Assign BoolOp Compare Call Call Compare Call Call For Call Call" + }, + { + "library": "tensorflow", + "name": "sparse_to_dense", + "source_code": "@tf_export(v1=['sparse_to_dense'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated(None, 'Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead.')\ndef sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value=0, validate_indices=True, name=None):\n return gen_sparse_ops.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value=default_value, validate_indices=validate_indices, name=name)", + "docstring": "Converts a sparse representation into a dense tensor. Builds an array with shape such that All other values in are set to . If is a scalar, all sparse indices are set to this single value. Indices should be sorted in lexicographic order, and indices must not contain any repeats. If is True, these properties are checked during execution. Args: sparse_indices: A 0-D, 1-D, or 2-D of type or . contains the complete index where will be placed. output_shape: A 1-D of the same type as . Shape of the dense output tensor. sparse_values: A 0-D or 1-D . Values corresponding to each row of , or a scalar value to be used for all sparse indices. default_value: A 0-D of the same type as . Value to set for indices not specified in . Defaults to zero. validate_indices: A boolean value. If True, indices are checked to make sure they are sorted in lexicographic order and that there are no repeats. name: A name for the operation (optional). Returns: Dense of shape . Has the same type as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", + "ast_data": "FunctionDef name:sparse_to_dense arg:sparse_indices arg:output_shape arg:sparse_values arg:default_value arg:validate_indices arg:name arguments arg arg arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_parse_single_sequence_example_raw", + "source_code": "def _parse_single_sequence_example_raw(serialized, context, feature_list, debug_name, name=None):\n with ops.name_scope(name, 'ParseSingleExample', [serialized, debug_name]):\n serialized = ops.convert_to_tensor(serialized, name='serialized')\n serialized = _assert_scalar(serialized, 'serialized')\n return _parse_sequence_example_raw(serialized, debug_name, context, feature_list, name)[:2]", + "docstring": "Parses a single proto. Args: serialized: A scalar (0-D Tensor) of type string, a single binary serialized proto. context: A containing the parameters for the parse op for the context features. feature_list: A containing the parameters for the parse op for the feature_list features. debug_name: A scalar (0-D Tensor) of strings (optional), the name of the serialized proto. name: A name for this operation (optional). Returns: A tuple of two s, each mapping keys to s and s. The first dict contains the context key/values. The second dict contains the feature_list key/values. Raises: TypeError: if feature_list.dense_defaults is not either None or a dict.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_ops.py", + "ast_data": "FunctionDef name:_parse_single_sequence_example_raw arg:serialized arg:context arg:feature_list arg:debug_name arg:name arguments arg arg arg arg arg With Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "replace_as_expression", + "source_code": "def replace_as_expression(template, **replacements):\n replacement = replace(template, **replacements)\n if len(replacement) != 1:\n raise ValueError('single expression expected; for more general templates use replace')\n node, = replacement\n if isinstance(node, gast.Expr):\n return node.value\n elif isinstance(node, gast.Name):\n return node\n raise ValueError('the template is expected to generate an expression or a name node; instead found %s' % node)", + "docstring": "Variant of replace that generates expressions, instead of code blocks.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\templates.py", + "ast_data": "FunctionDef name:replace_as_expression arg:template arguments arg arg Assign Call If Compare Call Raise Call Assign If Call Return return:yes If Call Return return:yes Raise Call" + }, + { + "library": "tensorflow", + "name": "_enclosing_power_of_two", + "source_code": "def _enclosing_power_of_two(value):\n value_static = tensor_util.constant_value(value)\n if value_static is not None:\n return constant_op.constant(int(2 ** np.ceil(np.log(value_static) / np.log(2.0))), value.dtype)\n return math_ops.cast(math_ops.pow(2.0, math_ops.ceil(math_ops.log(math_ops.cast(value, dtypes.float32)) / math_ops.log(2.0))), value.dtype)", + "docstring": "Return 2**N for integer N such that 2**N >= value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\spectral_ops.py", + "ast_data": "FunctionDef name:_enclosing_power_of_two arg:value arguments arg Assign Call If Compare Return return:yes Call Call Call Call Call Return return:yes Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "embedding_tables", + "source_code": "@property\ndef embedding_tables(self):\n raise NotImplementedError", + "docstring": "Returns a dict of embedding tables, keyed by .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_base.py", + "ast_data": "FunctionDef name:embedding_tables arg:self arguments arg Raise" + }, + { + "library": "pytorch", + "name": "_annotate_conv_bn", + "source_code": "@register_annotator('conv_bn')\ndef _annotate_conv_bn(gm: torch.fx.GraphModule, quantization_config: Optional[QuantizationConfig], filter_fn: Optional[Callable[[Node], bool]]=None) -> Optional[list[list[Node]]]:\n return _do_annotate_conv_bn(gm, quantization_config, filter_fn, has_relu=False)", + "docstring": "Find conv + batchnorm parititions Note: This is only used for QAT. In PTQ, batchnorm should already be fused into the conv.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\xnnpack_quantizer_utils.py", + "ast_data": "FunctionDef name:_annotate_conv_bn arg:gm arg:quantization_config arg:filter_fn arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "minposx", + "source_code": "@property\ndef minposx(self):\n return self._minpos[0]", + "docstring": "The minimum positive value in the *x*-direction within the Bbox. This is useful when dealing with logarithmic scales and other scales where negative bounds result in floating point errors, and will be used as the minimum *x*-extent instead of *x0*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:minposx arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "IsLoopExit", + "source_code": "def IsLoopExit(op):\n return op.type == 'Exit' or op.type == 'RefExit'", + "docstring": "Return true if is an Exit.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py", + "ast_data": "FunctionDef name:IsLoopExit arg:op arguments arg Return return:yes BoolOp Compare Compare" + }, + { + "library": "scikit-learn", + "name": "_interval_max_min_ratio", + "source_code": "def _interval_max_min_ratio(data):\n diff = np.diff(np.sort(data))\n return diff.max() / diff.min()", + "docstring": "Compute the ratio between the largest and smallest inter-point distances. A value larger than 5 typically indicates that the parameter range would better be displayed with a log scale while a linear scale would be more suitable otherwise.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_plotting.py", + "ast_data": "FunctionDef name:_interval_max_min_ratio arg:data arguments arg Assign Call Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "set_params", + "source_code": "def set_params(self, nbins=None):\n if nbins is not None:\n self.nbins = nbins", + "docstring": "Set parameters within this locator.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:set_params arg:self arg:nbins arguments arg arg If Compare Assign" + }, + { + "library": "kornia", + "name": "__init__", + "source_code": "def __init__(self, z: Tensor) -> None:\n super().__init__()\n KORNIA_CHECK_IS_TENSOR(z)\n check_so2_z_shape(z)\n self._z = Parameter(z)", + "docstring": "Construct the base class. Internally represented by complex number . Args: z: Complex number with the shape of :math: or :math:. Example: >>> real = torch.tensor(1.0) >>> imag = torch.tensor(2.0) >>> So2(torch.complex(real, imag)).z Parameter containing: tensor(1.+2.j, requires_grad=True)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:z arguments arg arg Call Call Call Call Assign Call" + }, + { + "library": "matplotlib", + "name": "is_pull_request", + "source_code": "def is_pull_request(issue):\n return bool(issue.get('pull_request', {}).get('html_url', None))", + "docstring": "Return True if the given issue is a pull request.", + "type": "function", + "file_path": "matplotlib\\tools\\gh_api.py", + "ast_data": "FunctionDef name:is_pull_request arg:issue arguments arg Return return:yes Call Call Call" + }, + { + "library": "django", + "name": "flatten_choices", + "source_code": "def flatten_choices(choices):\n for value_or_group, label_or_nested in choices or ():\n if isinstance(label_or_nested, (list, tuple)):\n yield from label_or_nested\n else:\n yield (value_or_group, label_or_nested)", + "docstring": "Flatten choices by removing nested values.", + "type": "function", + "file_path": "django\\django\\utils\\choices.py", + "ast_data": "FunctionDef name:flatten_choices arg:choices arguments arg For BoolOp If Call" + }, + { + "library": "cherrypy", + "name": "other", + "source_code": "@cherrypy.expose\ndef other(self, a=2, b='bananas', c=None):\n cherrypy.response.headers['Content-Type'] = 'text/plain'\n if c is None:\n return 'Have %d %s.' % (int(a), b)\n else:\n return 'Have %d %s, %s.' % (int(a), b, c)", + "docstring": "Render number of fruits based on third argument.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\scaffold\\__init__.py", + "ast_data": "FunctionDef name:other arg:self arg:a arg:b arg:c arguments arg arg arg arg Assign If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "transform_tensor", + "source_code": "def transform_tensor(self, input: Tensor, *, shape: Optional[Tensor]=None, match_channel: bool=True) -> Tensor:\n _validate_input_dtype(input, accepted_dtypes=[float16, float32, float64])\n if shape is None:\n return _transform_input(input)\n else:\n return _transform_input_by_shape(input, reference_shape=shape, match_channel=match_channel)", + "docstring": "Convert any incoming (H, W), (C, H, W) and (B, C, H, W) into (B, C, H, W).", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\_2d\\base.py", + "ast_data": "FunctionDef name:transform_tensor arg:self arg:input arguments arg arg arg arg Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "_fd", + "source_code": "def _fd(f):\n return f.fileno() if hasattr(f, 'fileno') else f", + "docstring": "Get a filedescriptor from something which could be a file or an fd.", + "type": "function", + "file_path": "django\\django\\core\\files\\locks.py", + "ast_data": "FunctionDef name:_fd arg:f arguments arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_setdiag", + "source_code": "def _setdiag(self, values, k):\n M, N = self.shape\n if k < 0:\n if values.ndim == 0:\n max_index = min(M + k, N)\n for i in range(max_index):\n self[i - k, i] = values\n else:\n max_index = min(M + k, N, len(values))\n if max_index <= 0:\n return\n for i, v in enumerate(values[:max_index]):\n self[i - k, i] = v\n elif values.ndim == 0:\n max_index = min(M, N - k)\n for i in range(max_index):\n self[i, i + k] = values\n else:\n max_index = min(M, N - k, len(values))\n if max_index <= 0:\n return\n for i, v in enumerate(values[:max_index]):\n self[i, i + k] = v", + "docstring": "This part of the implementation gets overridden by the different formats.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_base.py", + "ast_data": "FunctionDef name:_setdiag arg:self arg:values arg:k arguments arg arg arg Assign If Compare If Compare Assign Call For Call Assign Assign Call Call If Compare Return return:no For Call Assign If Compare Assign Call For Call Assign Assign Call Call If Compare Return return:no For Call Assign" + }, + { + "library": "tensorflow", + "name": "_BesselI1Grad", + "source_code": "@ops.RegisterGradient('BesselI1')\ndef _BesselI1Grad(op: ops.Operation, grad):\n x = op.inputs[0]\n y = op.outputs[0]\n with ops.control_dependencies([grad]):\n dy_dx = array_ops.where_v2(math_ops.equal(x, 0.0), math_ops.cast(1.0, x.dtype), special_math_ops.bessel_i0(x) - math_ops.div(y, x))\n return grad * dy_dx", + "docstring": "Compute gradient of bessel_i1(x) with respect to its argument.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_BesselI1Grad arg:op arg:grad arguments arg arg Assign Assign With Call Assign Call Call Call Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "score_samples", + "source_code": "def score_samples(self, X):\n check_is_fitted(self)\n X = validate_data(self, X, reset=False)\n return logsumexp(self._estimate_weighted_log_prob(X), axis=1)", + "docstring": "Compute the log-likelihood of each sample. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- log_prob : array, shape (n_samples,) Log-likelihood of each sample in under the current model.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\mixture\\_base.py", + "ast_data": "FunctionDef name:score_samples arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "log10", + "source_code": "@set_module('numpy.lib.scimath')\n@array_function_dispatch(_unary_dispatcher)\ndef log10(x):\n x = _fix_real_lt_zero(x)\n return nx.log10(x)", + "docstring": "Compute the logarithm base 10 of . Return the \"principal value\" (for a description of this, see ) of :math:. For real , this is a real number (`xxoutx >> import numpy as np (We set the printing precision so the example can be auto-tested) >>> np.set_printoptions(precision=4) >>> np.emath.log10(10**1) 1.0 >>> np.emath.log10([-10**1, -10**2, 10**2]) array([1.+1.3644j, 2.+1.3644j, 2.+0.j ])", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_scimath_impl.py", + "ast_data": "FunctionDef name:log10 arg:x arguments arg Assign Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "remove_zeros", + "source_code": "def remove_zeros(split_sections: list[int]):\n new_split_sections, index_mapping = ([], {})\n idx = 0\n for i in range(len(split_sections)):\n if split_sections[i] > 0:\n new_split_sections.append(split_sections[i])\n index_mapping[i] = idx\n idx += 1\n return (new_split_sections, index_mapping)", + "docstring": "Remove zeros from the list and get the index mapping dict from getitem in split node to getitem in new split node", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\split_cat.py", + "ast_data": "FunctionDef name:remove_zeros arg:split_sections arguments arg Assign Assign For Call Call If Compare Call Assign Return return:yes" + }, + { + "library": "django", + "name": "TableColumns", + "source_code": "class TableColumns(Table):\n\n def __init__(self, table, columns):\n self.table = table\n self.columns = columns\n\n def references_column(self, table, column):\n return self.table == table and column in self.columns\n\n def rename_column_references(self, table, old_column, new_column):\n if self.table == table:\n for index, column in enumerate(self.columns):\n if column == old_column:\n self.columns[index] = new_column", + "docstring": "Base class for references to multiple columns of a table.", + "type": "class", + "file_path": "django\\django\\db\\backends\\ddl_references.py", + "ast_data": "ClassDef name:TableColumns FunctionDef name:__init__ arg:self arg:table arg:columns arguments arg arg arg Assign Assign FunctionDef name:references_column arg:self arg:table arg:column arguments arg arg arg Return return:yes BoolOp Compare Compare FunctionDef name:rename_column_references arg:self arg:table arg:old_column arg:new_column arguments arg arg arg arg If Compare For Call If Compare Assign" + }, + { + "library": "tensorflow", + "name": "__call__", + "source_code": "def __call__(self, func: T) -> T:\n api_names_attr = API_ATTRS[self._api_name].names\n api_names_attr_v1 = API_ATTRS_V1[self._api_name].names\n _, undecorated_func = tf_decorator.unwrap(func)\n self.set_attr(undecorated_func, api_names_attr, self._names)\n self.set_attr(undecorated_func, api_names_attr_v1, self._names_v1)\n for name in self._names:\n _NAME_TO_SYMBOL_MAPPING[name] = func\n for name_v1 in self._names_v1:\n _NAME_TO_SYMBOL_MAPPING['compat.v1.%s' % name_v1] = func\n return func", + "docstring": "Calls this decorator. Args: func: decorated symbol (function or class). Returns: The input function with _tf_api_names attribute set.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_export.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:func arguments arg arg Assign Assign Assign Call Call Call For Assign For Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "scatter_update", + "source_code": "@tf_export(v1=['scatter_update'])\ndef scatter_update(ref, indices, updates, use_locking=True, name=None):\n if ref.dtype._is_ref_dtype:\n return gen_state_ops.scatter_update(ref, indices, updates, use_locking=use_locking, name=name)\n return ref._lazy_read(gen_resource_variable_ops.resource_scatter_update(ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name))", + "docstring": "Applies sparse updates to a variable reference. This operation computes This operation outputs after the update is done. This makes it easier to chain operations that need to use the reset value. If values in is to be updated more than once, because there are duplicate entries in , the order at which the updates happen for each value is undefined. Requires . Args: ref: A . indices: A . Must be one of the following types: , . A tensor of indices into the first dimension of . updates: A . Must have the same type as . A tensor of updated values to store in . use_locking: An optional . Defaults to . If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: Same as . Returned as a convenience for operations that want to use the updated values after the update is done.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\state_ops.py", + "ast_data": "FunctionDef name:scatter_update arg:ref arg:indices arg:updates arg:use_locking arg:name arguments arg arg arg arg arg If Return return:yes Call Return return:yes Call Call Call Call" + }, + { + "library": "pandas", + "name": "extract_pandas_array", + "source_code": "def extract_pandas_array(values: ArrayLike, dtype: DtypeObj | None, ndim: int) -> tuple[ArrayLike, DtypeObj | None]:\n if isinstance(values, ABCNumpyExtensionArray):\n values = values.to_numpy()\n if ndim and ndim > 1:\n values = np.atleast_2d(values)\n if isinstance(dtype, NumpyEADtype):\n dtype = dtype.numpy_dtype\n return (values, dtype)", + "docstring": "Ensure that we don't allow NumpyExtensionArray / NumpyEADtype in internals.", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\blocks.py", + "ast_data": "FunctionDef name:extract_pandas_array arg:values arg:dtype arg:ndim arguments arg arg arg If Call Assign Call If BoolOp Compare Assign Call If Call Assign Return return:yes" + }, + { + "library": "django", + "name": "get_default_columns", + "source_code": "def get_default_columns(self, select_mask, start_alias=None, opts=None, from_parent=None):\n result = []\n if opts is None:\n if (opts := self.query.get_meta()) is None:\n return result\n start_alias = start_alias or self.query.get_initial_alias()\n seen_models = {None: start_alias}\n select_mask_fields = set(composite.unnest(select_mask))\n for field in opts.concrete_fields:\n model = field.model._meta.concrete_model\n if model == opts.model:\n model = None\n if from_parent and model is not None and issubclass(from_parent._meta.concrete_model, model._meta.concrete_model):\n continue\n if select_mask and field not in select_mask_fields:\n continue\n alias = self.query.join_parent_model(opts, model, start_alias, seen_models)\n column = field.get_col(alias)\n result.append(column)\n return result", + "docstring": "Compute the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case \"opts\" and \"start_alias\" will be given to provide a starting point for the traversal. Return a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, return a list of (alias, col_name) pairs instead of strings as the first component and None as the second component).", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\compiler.py", + "ast_data": "FunctionDef name:get_default_columns arg:self arg:select_mask arg:start_alias arg:opts arg:from_parent arguments arg arg arg arg arg Assign If Compare If Compare Call Return return:yes Assign BoolOp Call Assign Assign Call Call For Assign If Compare Assign If BoolOp Compare Call If BoolOp Compare Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "enter_except_section", + "source_code": "def enter_except_section(self, section_id):\n if section_id in self.raises:\n self.leaves.update(self.raises[section_id])", + "docstring": "Enters an except section.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py", + "ast_data": "FunctionDef name:enter_except_section arg:self arg:section_id arguments arg arg If Compare Call" + }, + { + "library": "tensorflow", + "name": "check_image_file_header", + "source_code": "def check_image_file_header(filename):\n with tf.io.gfile.Gfile(filename, 'rb') as f:\n magic = read32(f)\n read32(f)\n rows = read32(f)\n cols = read32(f)\n if magic != 2051:\n raise ValueError('Invalid magic number %d in MNIST file %s' % (magic, f.name))\n if rows != 28 or cols != 28:\n raise ValueError('Invalid MNIST file %s: Expected 28x28 images, found %dx%d' % (f.name, rows, cols))", + "docstring": "Validate that filename corresponds to images for the MNIST dataset.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\tutorials\\dataset.py", + "ast_data": "FunctionDef name:check_image_file_header arg:filename arguments arg With Call Assign Call Call Assign Call Assign Call If Compare Raise Call If BoolOp Compare Compare Raise Call" + }, + { + "library": "tensorflow", + "name": "_tf_distributed_iterable_for_stmt", + "source_code": "def _tf_distributed_iterable_for_stmt(iter_, extra_test, body, get_state, set_state, symbol_names, opts):\n if extra_test is not None:\n raise NotImplementedError('break and return statements are not yet supported in for ... in distributed input loops.')\n init_vars = get_state()\n verify_loop_init_vars(init_vars, symbol_names)\n if 'shape_invariants' in opts:\n opts['shape_invariants'] = _shape_invariants_mapping_to_positional_list(opts['shape_invariants'], init_vars)\n\n def reduce_body(loop_vars, iterate):\n set_state(loop_vars)\n body(iterate)\n new_loop_vars = get_state()\n verify_tf_loop_vars(init_vars, loop_vars, new_loop_vars, symbol_names, opts)\n return new_loop_vars\n set_state(iter_.reduce(init_vars, reduce_body))", + "docstring": "Overload of for_stmt that iterates over TF distributed datasets.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py", + "ast_data": "FunctionDef name:_tf_distributed_iterable_for_stmt arg:iter_ arg:extra_test arg:body arg:get_state arg:set_state arg:symbol_names arg:opts arguments arg arg arg arg arg arg arg If Compare Raise Call Assign Call Call If Compare Assign Call FunctionDef name:reduce_body arg:loop_vars arg:iterate arguments arg arg Call Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "pygame", + "name": "get_arraytype", + "source_code": "def get_arraytype():\n warnings.warn(DeprecationWarning('only numpy arrays are now supported, this function will be removed in a future version of the module'))\n return 'numpy'", + "docstring": "pygame.sndarray.get_arraytype(): return str DEPRECATED - only numpy arrays are now supported.", + "type": "function", + "file_path": "pygame\\src_py\\sndarray.py", + "ast_data": "FunctionDef name:get_arraytype arguments Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "median", + "source_code": "@abstractmethod\ndef median(self, *, method):\n raise NotImplementedError()", + "docstring": "Median (50th percentile) If a continuous random variable :math: has probability :math: of taking on a value less than :math:, then :math: is the median. More generally, a median is a value :math: for which: .. math:: P(X ≤ m) ≤ 0.5 ≥ P(X ≥ m) For discrete random variables, the median may not be unique, in which case the smallest value satisfying the definition is reported. Parameters ---------- method : {None, 'formula', 'icdf'} The strategy used to evaluate the median. By default (`methodmethod` will be raised. Returns ------- out : array The median See Also -------- mean mode icdf References ---------- .. [1] Median, *Wikipedia*, Examples -------- Instantiate a distribution with the desired parameters: >>> from scipy import stats >>> X = stats.Uniform(a=0., b=10.) Compute the median: >>> X.median() np.float64(5.0) >>> X.median() == X.icdf(0.5) == X.iccdf(0.5) True", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_probability_distribution.py", + "ast_data": "FunctionDef name:median arg:self arguments arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "_is_known_loaded_type", + "source_code": "def _is_known_loaded_type(f, module_name, entity_name):\n if module_name not in sys.modules or not hasattr(sys.modules[module_name], entity_name):\n return False\n type_entity = getattr(sys.modules[module_name], entity_name)\n if isinstance(f, type_entity):\n return True\n if inspect.ismethod(f):\n if isinstance(f.__func__, type_entity):\n return True\n return False", + "docstring": "Tests whether the function or method is an instance of a known type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\conversion.py", + "ast_data": "FunctionDef name:_is_known_loaded_type arg:f arg:module_name arg:entity_name arguments arg arg arg If BoolOp Compare Call Return return:yes Assign Call If Call Return return:yes If Call If Call Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "MatWriteError", + "source_code": "class MatWriteError(Exception):\n pass", + "docstring": "Exception indicating a write issue.", + "type": "class", + "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py", + "ast_data": "ClassDef name:MatWriteError" + }, + { + "library": "pytorch", + "name": "_delay", + "source_code": "def _delay(seconds: Union[float, tuple[float, float]]) -> None:\n if isinstance(seconds, tuple):\n seconds = random.uniform(*seconds)\n if seconds >= 0.01:\n time.sleep(seconds)", + "docstring": "Suspend the current thread for ``. Args: seconds: Either the delay, in seconds, or a tuple of a lower and an upper bound within which a random delay will be picked.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\utils.py", + "ast_data": "FunctionDef name:_delay arg:seconds arguments arg If Call Assign Call If Compare Call" + }, + { + "library": "pytorch", + "name": "_is_trainable", + "source_code": "def _is_trainable(param: torch.Tensor) -> bool:\n return param.requires_grad", + "docstring": "Return if a parameter is trainable, where trainability is equivalent to requiring a gradient.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py", + "ast_data": "FunctionDef name:_is_trainable arg:param arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_tick_padding", + "source_code": "def get_tick_padding(self):\n padding = {'in': 0.0, 'inout': 0.5, 'out': 1.0}\n return self._size * padding[self._tickdir]", + "docstring": "Get the length of the tick outside of the Axes.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:get_tick_padding arg:self arguments arg Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "get_lr", + "source_code": "@override\ndef get_lr(self) -> list[float]:\n _warn_get_lr_called_within_step(self)\n if self.last_epoch == 0:\n return [group['lr'] * self.start_factor for group in self.optimizer.param_groups]\n if self._is_initial or self.last_epoch > self.total_iters:\n return [group['lr'] for group in self.optimizer.param_groups]\n return [group['lr'] * (1.0 + (self.end_factor - self.start_factor) / (self.total_iters * self.start_factor + (self.last_epoch - 1) * (self.end_factor - self.start_factor))) for group in self.optimizer.param_groups]", + "docstring": "Compute the learning rate.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "FunctionDef name:get_lr arg:self arguments arg Call If Compare Return return:yes If BoolOp Compare Return return:yes Return return:yes" + }, + { + "library": "authlib", + "name": "validate_token_endpoint_auth_method", + "source_code": "def validate_token_endpoint_auth_method(self):\n if 'token_endpoint_auth_method' not in self:\n self['token_endpoint_auth_method'] = 'client_secret_basic'\n self._validate_claim_value('token_endpoint_auth_method')", + "docstring": "String indicator of the requested authentication method for the token endpoint.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py", + "ast_data": "FunctionDef name:validate_token_endpoint_auth_method arg:self arguments arg If Compare Assign Call" + }, + { + "library": "pandas", + "name": "Buffer", + "source_code": "class Buffer(ABC):\n\n @property\n @abstractmethod\n def bufsize(self) -> int:\n pass\n\n @property\n @abstractmethod\n def ptr(self) -> int:\n pass\n\n @abstractmethod\n def __dlpack__(self):\n raise NotImplementedError('__dlpack__')\n\n @abstractmethod\n def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:\n pass", + "docstring": "Data in the buffer is guaranteed to be contiguous in memory. Note that there is no dtype attribute present, a buffer can be thought of as simply a block of memory. However, if the column that the buffer is attached to has a dtype that's supported by DLPack and ``. This distinction is useful to support both data exchange via DLPack on a buffer and (b) dtypes like variable-length strings which do not have a fixed number of bytes per element.", + "type": "class", + "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py", + "ast_data": "ClassDef name:Buffer FunctionDef name:bufsize arg:self arguments arg FunctionDef name:ptr arg:self arguments arg FunctionDef name:__dlpack__ arg:self arguments arg Raise Call FunctionDef name:__dlpack_device__ arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "set_device", + "source_code": "def set_device(device: _device_t) -> None:\n _lazy_init()\n device = _get_device_index(device)\n if device >= 0:\n torch._C._xpu_setDevice(device)", + "docstring": "Set the current device. Args: device (torch.device or int or str): selected device. This function is a no-op if this argument is negative.", + "type": "function", + "file_path": "pytorch\\torch\\xpu\\__init__.py", + "ast_data": "FunctionDef name:set_device arg:device arguments arg Call Assign Call If Compare Call" + }, + { + "library": "matplotlib", + "name": "set_norm", + "source_code": "def set_norm(self, norm):\n self.norm = norm", + "docstring": "Set the normalization instance. Parameters ---------- norm : or str or None Notes ----- If there are any colorbars using the mappable for this norm, setting the norm of the mappable will reset the norm, locator, and formatters on the colorbar to default.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py", + "ast_data": "FunctionDef name:set_norm arg:self arg:norm arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "_minimum_control_deps", + "source_code": "def _minimum_control_deps(outputs):\n if context.executing_eagerly():\n return []\n outputs = nest.flatten(outputs, expand_composites=True)\n for out in outputs:\n if not isinstance(out, variables.Variable):\n return [out]\n return []", + "docstring": "Returns the minimum control dependencies to ensure step succeeded.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", + "ast_data": "FunctionDef name:_minimum_control_deps arg:outputs arguments arg If Call Return return:no Assign Call For If Call Return return:yes Return return:no" + }, + { + "library": "pytorch", + "name": "propagate_inst_exn_table_entries", + "source_code": "def propagate_inst_exn_table_entries(instructions: list[Instruction]) -> None:\n indexof = get_indexof(instructions)\n entries: dict[tuple[int, int], InstructionExnTabEntry] = {}\n for inst in instructions:\n if inst.exn_tab_entry:\n key = (indexof[inst.exn_tab_entry.start], indexof[inst.exn_tab_entry.end])\n if key in entries:\n assert inst.exn_tab_entry == entries[key]\n entries[key] = inst.exn_tab_entry\n sorted_entries = [entries[key] for key in sorted(entries.keys(), key=lambda t: (t[0], -t[1]))]\n check_inst_exn_tab_entries_nested(sorted_entries, indexof)\n for entry in sorted_entries:\n for i in range(indexof[entry.start], indexof[entry.end] + 1):\n instructions[i].exn_tab_entry = copy.copy(entry)", + "docstring": "Copies exception table entries to all instructions in an entry's range. Supports nested exception table entries.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", + "ast_data": "FunctionDef name:propagate_inst_exn_table_entries arg:instructions arguments arg Assign Call For If Assign If Compare Compare Assign Assign Call Call arguments arg Call For For Call Assign Call" + }, + { + "library": "matplotlib", + "name": "set_filterrad", + "source_code": "def set_filterrad(self, filterrad):\n r = float(filterrad)\n if r <= 0:\n raise ValueError('The filter radius must be a positive number')\n self._filterrad = r\n self.stale = True", + "docstring": "Set the resize filter radius (only applicable to some interpolation schemes). See help for . Parameters ---------- filterrad : positive float", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\image.py", + "ast_data": "FunctionDef name:set_filterrad arg:self arg:filterrad arguments arg arg Assign Call If Compare Raise Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "_TridiagonalSolveGrad", + "source_code": "@ops.RegisterGradient('TridiagonalSolve')\ndef _TridiagonalSolveGrad(op: ops.Operation, grad):\n diags = op.inputs[0]\n x = op.outputs[0]\n partial_pivoting = op.get_attr('partial_pivoting')\n perturb_singular = op.get_attr('perturb_singular')\n diags_transposed = _TransposeTridiagonalMatrix(diags)\n grad_rhs = linalg_ops.tridiagonal_solve(diags_transposed, grad, partial_pivoting=partial_pivoting, perturb_singular=perturb_singular)\n grad_diags = -_MatmulExtractingThreeDiagonals(grad_rhs, x)\n return (grad_diags, grad_rhs)", + "docstring": "Gradient for TridiagonalSolveGrad.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py", + "ast_data": "FunctionDef name:_TridiagonalSolveGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "dtype", + "source_code": "@property\ndef dtype(self) -> np.dtype[np.timedelta64]:\n return self._ndarray.dtype", + "docstring": "The dtype for the TimedeltaArray. .. warning:: A future version of pandas will change dtype to be an instance of a :class: subclass, not a ``. Returns ------- numpy.dtype", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\timedeltas.py", + "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "right_inverse", + "source_code": "def right_inverse(self, value: Tensor) -> None:\n with torch.no_grad():\n for module in reversed(self):\n if hasattr(module, 'right_inverse'):\n value = module.right_inverse(value)\n else:\n raise RuntimeError(f'parametrization {type(module).__name__} does not implement right_inverse.')\n if self.is_tensor:\n if not isinstance(value, Tensor):\n raise ValueError(f'`right_inverse` should return a tensor. Got {type(value).__name__}')\n if value.dtype != self.original.dtype:\n raise ValueError(f'The tensor returned by `right_inverse` has dtype {value.dtype} while `original` has dtype {self.original.dtype}')\n _maybe_set(self.original, value)\n else:\n if not isinstance(value, collections.abc.Sequence):\n raise ValueError(f\"'right_inverse' must return a sequence of tensors. Got {type(value).__name__}.\")\n if len(value) != self.ntensors:\n raise ValueError(f\"'right_inverse' must return a sequence of tensors of length {self.ntensors}. Got a sequence of length {len(value)}.\")\n for i, tensor in enumerate(value):\n original_i = getattr(self, f'original{i}')\n if not isinstance(tensor, Tensor):\n raise ValueError(f'`right_inverse` must return a sequence of tensors. Got element {i} of type {type(tensor).__name__}')\n if original_i.dtype != tensor.dtype:\n raise ValueError(f'Tensor {i} returned by `right_inverse` has dtype {tensor.dtype} while `original{i}` has dtype {original_i.dtype}')\n _maybe_set(original_i, tensor)", + "docstring": "Call the ``, ... if it outputs several. Args: value (Tensor): Value to which initialize the module", + "type": "method", + "file_path": "pytorch\\torch\\nn\\utils\\parametrize.py", + "ast_data": "FunctionDef name:right_inverse arg:self arg:value arguments arg arg With Call For Call If Call Assign Call Raise Call Call If If Call Raise Call Call If Compare Raise Call Call If Call Raise Call Call If Compare Call Raise Call Call For Call Assign Call If Call Raise Call Call If Compare Raise Call Call" + }, + { + "library": "tensorflow", + "name": "scatter_nd_sub", + "source_code": "def scatter_nd_sub(self, indices, updates, name=None):\n return self._lazy_read(gen_state_ops.resource_scatter_nd_sub(self.handle, indices, ops.convert_to_tensor(updates, self.dtype), name=name))", + "docstring": "Applies sparse subtraction to individual values or slices in a Variable. is a with rank and is a of rank . must be integer tensor, containing indices into . It must be shape where . The innermost dimension of (with length ) corresponds to indices into elements (if ) or slices (if ) along the th dimension of . is of rank with shape: For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: The resulting update to ref would look like this: [1, -9, 3, -6, -6, 6, 7, -4] See for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: The updated variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:scatter_nd_sub arg:self arg:indices arg:updates arg:name arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "cryptography", + "name": "block_size", + "source_code": "@property\n@abc.abstractmethod\ndef block_size(self) -> int | None:\n pass", + "docstring": "The internal block size of the hash function, or None if the hash function does not use blocks internally (e.g. SHA3).", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\hashes.py", + "ast_data": "FunctionDef name:block_size arg:self arguments arg" + }, + { + "library": "scipy", + "name": "log_pdet", + "source_code": "@property\ndef log_pdet(self):\n return np.array(self._log_pdet, dtype=float)[()]", + "docstring": "Log of the pseudo-determinant of the covariance matrix", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_covariance.py", + "ast_data": "FunctionDef name:log_pdet arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "__repr__", + "source_code": "def __repr__(self) -> str:\n repr_params = fmt.get_series_repr_params()\n return self.to_string(**repr_params)", + "docstring": "Return a string representation for a particular Series.", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "__init__", + "source_code": "def __init__(self, value, output_field=None):\n super().__init__(output_field=output_field)\n self.value = value", + "docstring": "Arguments: * value: the value this expression represents. The value will be added into the sql parameter list and properly quoted. * output_field: an instance of the model field type that this expression will return, such as IntegerField() or CharField().", + "type": "method", + "file_path": "django\\django\\db\\models\\expressions.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:value arg:output_field arguments arg arg arg Call Call Assign" + }, + { + "library": "tensorflow", + "name": "Dropout", + "source_code": "class Dropout(keras_layers.Dropout, base.Layer):\n\n def __init__(self, rate=0.5, noise_shape=None, seed=None, name=None, **kwargs):\n super(Dropout, self).__init__(rate=rate, noise_shape=noise_shape, seed=seed, name=name, **kwargs)\n\n def call(self, inputs, training=False):\n return super(Dropout, self).call(inputs, training=training)", + "docstring": "Applies Dropout to the input. Dropout consists in randomly setting a fraction of input units to 0 at each update during training time, which helps prevent overfitting. The units that are kept are scaled by , so that their sum is unchanged at training time and inference time. Args: rate: The dropout rate, between 0 and 1. E.g. would drop out 10% of input units. noise_shape: 1D tensor of type representing the shape of the binary dropout mask that will be multiplied with the input. For instance, if your inputs have shape , and you want the dropout mask to be the same for all timesteps, you can use . seed: A Python integer. Used to create random seeds. See . for behavior. name: The name of the layer (string).", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\core.py", + "ast_data": "ClassDef name:Dropout FunctionDef name:__init__ arg:self arg:rate arg:noise_shape arg:seed arg:name arguments arg arg arg arg arg arg Call Call FunctionDef name:call arg:self arg:inputs arg:training arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "restore_saveables", + "source_code": "def restore_saveables(self, tensor_saveables, python_positions, registered_savers=None, reader=None):\n if reader is None:\n reader = py_checkpoint_reader.NewCheckpointReader(self.save_path_string)\n restore_ops = []\n for position in python_positions:\n key = position.object_proto.attributes[0].checkpoint_key\n position.trackable.deserialize(reader.get_tensor(key))\n if tensor_saveables or registered_savers:\n flat_saveables = saveable_object_util.validate_and_slice_inputs(tensor_saveables)\n new_restore_ops = functional_saver.MultiDeviceSaver.from_saveables(flat_saveables, registered_savers).restore(self.save_path_tensor, self.options)\n if not context.executing_eagerly():\n for name, restore_op in sorted(new_restore_ops.items()):\n restore_ops.append(restore_op)\n assert name not in self.restore_ops_by_name\n self.restore_ops_by_name[name] = restore_op\n return restore_ops", + "docstring": "Run or build restore operations for SaveableObjects. Args: tensor_saveables: s which correspond to Tensors. python_positions: List of CheckpointPositions bound to objects which must be restored eagerly. registered_savers: a dict mapping saver names-> object name -> Trackable. reader: A . If None, a new instance will be created. Returns: When graph building, a list of restore operations, either cached or newly created, to restore .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:restore_saveables arg:self arg:tensor_saveables arg:python_positions arg:registered_savers arg:reader arguments arg arg arg arg arg If Compare Assign Call Assign For Assign Call Call If BoolOp Assign Call Assign Call Call If Call For Call Call Call Compare Assign Return return:yes" + }, + { + "library": "django", + "name": "clean", + "source_code": "def clean(self, value, model_instance):\n value = self.to_python(value)\n self.validate(value, model_instance)\n self.run_validators(value)\n return value", + "docstring": "Convert the value's type and run validation. Validation errors from to_python() and validate() are propagated. Return the correct value if no error is raised.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\__init__.py", + "ast_data": "FunctionDef name:clean arg:self arg:value arg:model_instance arguments arg arg arg Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_constant_fold", + "source_code": "def _constant_fold(self, fn: Callable[[list[int]], int], seq: list[Union[int, str]]) -> list[Union[int, str]]:\n items: list[Union[int, str]] = [x for x in seq if not isinstance(x, int)]\n const_items = [x for x in seq if isinstance(x, int)]\n if const_items:\n items.append(fn(const_items))\n return items", + "docstring": "Constant fold through a commutative fn where ints are constants", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py", + "ast_data": "FunctionDef name:_constant_fold arg:self arg:fn arg:seq arguments arg arg arg Call Assign Call If Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "WORMTable", + "source_code": "class WORMTable(Table):\n table_type = 'worm'\n\n def read(self, where=None, columns=None, start: int | None=None, stop: int | None=None):\n raise NotImplementedError('WORMTable needs to implement read')\n\n def write(self, obj, **kwargs) -> None:\n raise NotImplementedError('WORMTable needs to implement write')", + "docstring": "a write-once read-many table: this format DOES NOT ALLOW appending to a table. writing is a one-time operation the data are stored in a format that allows for searching the data on disk", + "type": "class", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "ClassDef name:WORMTable Assign FunctionDef name:read arg:self arg:where arg:columns arg:start arg:stop arguments arg arg arg arg arg Raise Call FunctionDef name:write arg:self arg:obj arguments arg arg arg Raise Call" + }, + { + "library": "matplotlib", + "name": "adjusted_figsize", + "source_code": "def adjusted_figsize(w, h, dpi, n):\n\n def correct_roundoff(x, dpi, n):\n if int(x * dpi) % n != 0:\n if int(np.nextafter(x, np.inf) * dpi) % n == 0:\n x = np.nextafter(x, np.inf)\n elif int(np.nextafter(x, -np.inf) * dpi) % n == 0:\n x = np.nextafter(x, -np.inf)\n return x\n wnew = int(w * dpi / n) * n / dpi\n hnew = int(h * dpi / n) * n / dpi\n return (correct_roundoff(wnew, dpi, n), correct_roundoff(hnew, dpi, n))", + "docstring": "Compute figure size so that pixels are a multiple of n. Parameters ---------- w, h : float Size in inches. dpi : float The dpi. n : int The target multiple. Returns ------- wnew, hnew : float The new figure size in inches.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\animation.py", + "ast_data": "FunctionDef name:adjusted_figsize arg:w arg:h arg:dpi arg:n arguments arg arg arg arg FunctionDef name:correct_roundoff arg:x arg:dpi arg:n arguments arg arg arg If Compare Call If Compare Call Call Assign Call If Compare Call Call Assign Call Return return:yes Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "count", + "source_code": "def count(self):\n if self._result_cache is not None:\n return len(self._result_cache)\n return self.query.get_count(using=self.db)", + "docstring": "Perform a SELECT COUNT() and return the number of records as an integer. If the QuerySet is already fully cached, return the length of the cached results set to avoid multiple SELECT COUNT(*) calls.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:count arg:self arguments arg If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "seaborn", + "name": "remove_na", + "source_code": "def remove_na(vector):\n return vector[pd.notnull(vector)]", + "docstring": "Helper method for removing null values from data vectors. Parameters ---------- vector : vector object Must implement boolean masking with [] subscript syntax. Returns ------- clean_clean : same type as `` Vector of data with null values removed. May be a copy or a view.", + "type": "function", + "file_path": "seaborn\\seaborn\\utils.py", + "ast_data": "FunctionDef name:remove_na arg:vector arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "TFLiteSavedModelConverter", + "source_code": "class TFLiteSavedModelConverter(TFLiteConverterBaseV1):\n\n def __init__(self, saved_model_dir, saved_model_tags, saved_model_exported_names, experimental_debug_info_func=None):\n super(TFLiteSavedModelConverter, self).__init__(experimental_debug_info_func)\n self.saved_model_dir = saved_model_dir\n self._saved_model_tags = saved_model_tags\n self._saved_model_exported_names = saved_model_exported_names\n if len(self._saved_model_exported_names) != 1:\n raise ValueError('Only supports a single signature key.')\n signature_key = self._saved_model_exported_names[0]\n result = _freeze_saved_model(self.saved_model_dir, None, None, None, self._saved_model_tags, signature_key)\n self._graph_def = result[0]\n self._input_tensors = result[1]\n self._output_tensors = result[2]\n self._parse_saved_model_args()\n\n @_export_metrics\n def convert(self):\n return super(TFLiteSavedModelConverter, self).convert()", + "docstring": "Converts the given SavedModel into TensorFlow Lite model. Attributes: saved_model_dir: Directory of the SavedModel.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "ClassDef name:TFLiteSavedModelConverter FunctionDef name:__init__ arg:self arg:saved_model_dir arg:saved_model_tags arg:saved_model_exported_names arg:experimental_debug_info_func arguments arg arg arg arg arg Call Call Assign Assign Assign If Compare Call Raise Call Assign Assign Call Assign Assign Assign Call FunctionDef name:convert arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_eval", + "source_code": "def _eval(self, tensor):\n raise errors.UnimplementedError('The evaluation method should be implemented in sub-classes.')", + "docstring": "Returns the value in the tensor. Must be implemented in sub-classes.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py", + "ast_data": "FunctionDef name:_eval arg:self arg:tensor arguments arg arg Raise Call" + }, + { + "library": "scipy", + "name": "time_count_neighbors_shallow", + "source_code": "def time_count_neighbors_shallow(self, mn1n2, Nr):\n self.T1s.count_neighbors(self.T2s, self.r)", + "docstring": "Count neighbors for a shallow kd-tree dim | # points T1 | # points T2 | Nr", + "type": "method", + "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py", + "ast_data": "FunctionDef name:time_count_neighbors_shallow arg:self arg:mn1n2 arg:Nr arguments arg arg arg Call" + }, + { + "library": "scikit-learn", + "name": "inverse_transform", + "source_code": "def inverse_transform(self, yt):\n check_is_fitted(self)\n if yt.shape[1] != len(self.classes_):\n raise ValueError('Expected indicator for {0} classes, but got {1}'.format(len(self.classes_), yt.shape[1]))\n if sp.issparse(yt):\n yt = yt.tocsr()\n if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:\n raise ValueError('Expected only 0s and 1s in label indicator.')\n return [tuple(self.classes_.take(yt.indices[start:end])) for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]\n else:\n unexpected = np.setdiff1d(yt, [0, 1])\n if len(unexpected) > 0:\n raise ValueError('Expected only 0s and 1s in label indicator. Also got {0}'.format(unexpected))\n return [tuple(self.classes_.compress(indicators)) for indicators in yt]", + "docstring": "Transform the given indicator matrix into label sets. Parameters ---------- yt : {ndarray, sparse matrix} of shape (n_samples, n_classes) A matrix containing only 1s ands 0s. Returns ------- y_original : list of tuples The set of labels for each sample such that consists of for each .", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py", + "ast_data": "FunctionDef name:inverse_transform arg:self arg:yt arguments arg arg Call If Compare Call Raise Call Call Call If Call Assign Call If BoolOp Compare Call Compare Call Call Raise Call Return return:yes Call Call Call Assign Call If Compare Call Raise Call Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_format_argument_list", + "source_code": "def _format_argument_list(allow_args: list[str]) -> str:\n if 'self' in allow_args:\n allow_args.remove('self')\n if not allow_args:\n return ''\n elif len(allow_args) == 1:\n return f\" except for the argument '{allow_args[0]}'\"\n else:\n last = allow_args[-1]\n args = ', '.join([\"'\" + x + \"'\" for x in allow_args[:-1]])\n return f\" except for the arguments {args} and '{last}'\"", + "docstring": "Convert the allow_args argument (either string or integer) of function to a string describing it to be inserted into warning message. Parameters ---------- allowed_args : list, tuple or int The argument for , but None value is not allowed. Returns ------- str The substring describing the argument list in best way to be inserted to the warning message. Examples -------- -> '' -> \"except for the arguments 'a'\" -> \"except for the arguments 'a' and 'b'\" -> \"except for the arguments 'a', 'b' and 'c'\"", + "type": "function", + "file_path": "pandas\\pandas\\util\\_decorators.py", + "ast_data": "FunctionDef name:_format_argument_list arg:allow_args arguments arg If Compare Call If Return return:yes If Compare Call Return return:yes Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_is_all_finite", + "source_code": "def _is_all_finite(grads):\n\n def raw_values(g):\n return g.values if isinstance(g, indexed_slices.IndexedSlices) else g\n is_finite_per_grad = [math_ops.reduce_all(math_ops.is_finite(raw_values(g))) for g in grads if g is not None]\n return math_ops.reduce_all(is_finite_per_grad)", + "docstring": "Returns a scalar boolean tensor indicating if all gradients are finite.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py", + "ast_data": "FunctionDef name:_is_all_finite arg:grads arguments arg FunctionDef name:raw_values arg:g arguments arg Return return:yes Call Assign Call Call Call Compare Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "direct_repr", + "source_code": "class direct_repr:\n\n def __init__(self, value):\n self._repr = value\n\n def __repr__(self):\n return self._repr", + "docstring": "A placeholder class to destringify annotations from ast", + "type": "class", + "file_path": "matplotlib\\tools\\boilerplate.py", + "ast_data": "ClassDef name:direct_repr FunctionDef name:__init__ arg:self arg:value arguments arg arg Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "pygame", + "name": "get_count", + "source_code": "def get_count():\n _check_init()\n return _pypm.CountDevices()", + "docstring": "gets the number of devices. pygame.midi.get_count(): return num_devices Device ids range from 0 to get_count() -1", + "type": "function", + "file_path": "pygame\\src_py\\midi.py", + "ast_data": "FunctionDef name:get_count arguments Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "__truediv__", + "source_code": "def __truediv__(self, other):\n if self._delegate_binop(other):\n return NotImplemented\n return true_divide(self, other)", + "docstring": "Divide other into self, and return a new masked array.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__truediv__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Call" + }, + { + "library": "sphinx", + "name": "SphinxDummySourceClass", + "source_code": "def SphinxDummySourceClass(source: Any, *args: Any, **kwargs: Any) -> Any:\n return source", + "docstring": "Bypass source object as is to cheat Publisher.", + "type": "function", + "file_path": "sphinx\\sphinx\\io.py", + "ast_data": "FunctionDef name:SphinxDummySourceClass arg:source arguments arg arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "__call__", + "source_code": "def __call__(self, fn: Callable[..., Any]) -> '_DependentProperty':\n return _DependentProperty(fn, is_discrete=self._is_discrete, event_dim=self._event_dim)", + "docstring": "Support for syntax to customize static attributes:: @constraints.dependent_property(is_discrete=True, event_dim=1) def support(self): ...", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\constraints.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:fn arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "random_uniform", + "source_code": "def random_uniform(self, shape, minval, maxval, dtype):\n if self.seed:\n op = stateless_random_ops.stateless_random_uniform\n else:\n op = random_ops.random_uniform\n return op(shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)", + "docstring": "A deterministic random uniform if seed is passed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py", + "ast_data": "FunctionDef name:random_uniform arg:self arg:shape arg:minval arg:maxval arg:dtype arguments arg arg arg arg arg If Assign Assign Return return:yes Call" + }, + { + "library": "scrapy", + "name": "run", + "source_code": "def run(self, args: list[str], opts: argparse.Namespace) -> None:\n raise NotImplementedError", + "docstring": "Entry point for running commands", + "type": "method", + "file_path": "scrapy\\scrapy\\commands\\__init__.py", + "ast_data": "FunctionDef name:run arg:self arg:args arg:opts arguments arg arg arg Raise" + }, + { + "library": "scikit-learn", + "name": "score", + "source_code": "def score(self, X, y=None):\n return np.mean(self.score_samples(X))", + "docstring": "Compute the average log-likelihood of the samples. Parameters ---------- X : ndarray of shape (n_samples, n_features) The data. y : Ignored Ignored parameter. Returns ------- ll : float Average log-likelihood of the samples under the current model.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_factor_analysis.py", + "ast_data": "FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, experimental_debug_info_func):\n super(TFLiteConverterBaseV1, self).__init__()\n self.inference_type = _dtypes.float32\n self.inference_input_type = None\n self.inference_output_type = None\n self.output_format = constants.TFLITE\n self.quantized_input_stats = {}\n self.default_ranges_stats = None\n self.drop_control_dependency = True\n self.reorder_across_fake_quant = False\n self.change_concat_input_ranges = False\n self.dump_graphviz_dir = None\n self.dump_graphviz_video = False\n self.conversion_summary_dir = None\n self._debug_info_func = experimental_debug_info_func\n self._metadata.environment.apiVersion = 1", + "docstring": "Constructor for TFLiteConverter. Args: experimental_debug_info_func: An experimental function to retrieve the graph debug info for a set of nodes from the .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:experimental_debug_info_func arguments arg arg Call Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "_as_node_def_input", + "source_code": "def _as_node_def_input(self):\n assert self._op.name\n if self.value_index == 0:\n return self._op.name\n else:\n return '%s:%d' % (self._op.name, self.value_index)", + "docstring": "Return a value to use for the NodeDef \"input\" attribute. The returned string can be used in a NodeDef \"input\" attribute to indicate that the NodeDef uses this Tensor as input. Raises: ValueError: if this Tensor's Operation does not have a name. Returns: a string.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py", + "ast_data": "FunctionDef name:_as_node_def_input arg:self arguments arg If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_tf_tensor_list_pop", + "source_code": "def _tf_tensor_list_pop(list_, i, opts):\n if i is not None:\n raise NotImplementedError('tensor lists only support removing from the end')\n if opts.element_dtype is None:\n raise ValueError('cannot pop from a list without knowing its element type; use set_element_type to annotate it')\n if opts.element_shape is None:\n raise ValueError('cannot pop from a list without knowing its element shape; use set_element_type to annotate it')\n list_out, x = list_ops.tensor_list_pop_back(list_, element_dtype=opts.element_dtype)\n x.set_shape(opts.element_shape)\n return (list_out, x)", + "docstring": "Overload of list_pop that stages a Tensor list pop.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\data_structures.py", + "ast_data": "FunctionDef name:_tf_tensor_list_pop arg:list_ arg:i arg:opts arguments arg arg arg If Compare Raise Call If Compare Raise Call If Compare Raise Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_multi_worker_concat", + "source_code": "def _multi_worker_concat(v, strategy):\n replicas = strategy.gather(v, axis=0)\n if _is_per_replica_instance(v):\n shapes = array_ops.concat([array_ops.expand_dims_v2(array_ops.shape(single_value)[0], axis=0) for single_value in v.values], axis=0)\n all_shapes = strategy.gather(shapes, axis=0)\n else:\n all_shapes = strategy.gather(array_ops.expand_dims_v2(array_ops.shape(v)[0], axis=0), axis=0)\n replicas = array_ops.split(replicas, num_or_size_splits=all_shapes, num=strategy.num_replicas_in_sync)\n ordered_replicas = []\n num_replicas_per_worker = len(strategy.extended.worker_devices)\n for replica_id in range(num_replicas_per_worker):\n ordered_replicas += replicas[replica_id::num_replicas_per_worker]\n return concat(ordered_replicas)", + "docstring": "Order PerReplica objects for CollectiveAllReduceStrategy and concat.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", + "ast_data": "FunctionDef name:_multi_worker_concat arg:v arg:strategy arguments arg arg Assign Call If Call Assign Call Call Call Assign Call Assign Call Call Call Assign Call Assign Assign Call For Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_good_shape", + "source_code": "def _good_shape(x, shape, axes):\n if shape is not None and axes is None:\n shape = _helper._iterable_of_int(shape, 'shape')\n if len(shape) != np.ndim(x):\n raise ValueError('when given, axes and shape arguments have to be of the same length')\n return shape", + "docstring": "Ensure that shape argument is valid for scipy.fftpack scipy.fftpack does not support len(shape) < x.ndim when axes is not given.", + "type": "function", + "file_path": "scipy\\scipy\\fftpack\\_helper.py", + "ast_data": "FunctionDef name:_good_shape arg:x arg:shape arg:axes arguments arg arg arg If BoolOp Compare Compare Assign Call If Compare Call Call Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "SingleDevice", + "source_code": "class SingleDevice(object):\n\n def __init__(self, device):\n self.device = device", + "docstring": "Used with to create a non-mirrored variable.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\numpy_dataset.py", + "ast_data": "ClassDef name:SingleDevice FunctionDef name:__init__ arg:self arg:device arguments arg arg Assign" + }, + { + "library": "cherrypy", + "name": "parse_header", + "source_code": "def parse_header(line):\n parts = _parse_param(';' + line)\n key = parts.__next__()\n pdict = {}\n for p in parts:\n i = p.find('=')\n if i >= 0:\n name = p[:i].strip().lower()\n value = p[i + 1:].strip()\n if len(value) >= 2 and value[0] == value[-1] == '\"':\n value = value[1:-1]\n value = value.replace('\\\\\\\\', '\\\\').replace('\\\\\"', '\"')\n pdict[name] = value\n return (key, pdict)", + "docstring": "Parse a `cgicherrypy/cherrypy#2014 (comment) `_ for background.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\_private_api\\compat\\headers.py", + "ast_data": "FunctionDef name:parse_header arg:line arguments arg Assign Call Assign Call Assign For Assign Call If Compare Assign Call Call Assign Call If BoolOp Compare Call Compare Assign Assign Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_flat_tensor_specs", + "source_code": "@property\ndef _flat_tensor_specs(self) -> List[TypeSpec]:\n component_flat_tensor_specs = nest.map_structure(functools.partial(get_batchable_flat_tensor_specs, context_spec=self), self._component_specs)\n return nest.flatten(component_flat_tensor_specs)", + "docstring": "A list of TensorSpecs compatible with self._to_tensor_list(v).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", + "ast_data": "FunctionDef name:_flat_tensor_specs arg:self arguments arg Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "gen_mkl_autotuner", + "source_code": "def gen_mkl_autotuner(example_inputs, iters=10, warmup=1):\n fx_model = None\n old_modules = None\n\n def use_mkl_heuristic(graph: MklSubgraph) -> bool:\n nonlocal fx_model, old_modules\n input_nodes = graph.start_nodes\n if fx_model is None:\n fx_model = graph.fx_graph.owning_module\n old_modules = graph.fx_graph.old_modules\n ShapeProp(fx_model).propagate(example_inputs)\n sample_inputs = [torch.randn(node.shape) for node in input_nodes]\n output_args = cast(list[fx.Node], [node.args[0] for node in graph.end_nodes])\n submodule = extract_subgraph(fx_model, graph.nodes, input_nodes, output_args)\n\n def benchmark(f):\n for _ in range(warmup):\n f()\n begin = time.time()\n for _ in range(iters):\n f()\n return time.time() - begin\n mkl_time = benchmark(lambda: [i.to_dense() for i in submodule(*[i.to_mkldnn() for i in sample_inputs])])\n reset_modules(submodule.graph.nodes, dict(submodule.named_modules()), old_modules)\n no_mkl_time = benchmark(lambda: submodule(*sample_inputs))\n return mkl_time < no_mkl_time\n return use_mkl_heuristic", + "docstring": "This generates a heuristic that can be passed into that determines whether a subgraph should be run in MKL by running it with the example_inputs. Example usage: heuristic = gen_mkl_autotuner(example_inputs, iters=10) fast_model = optimization.optimize_for_inference(model, heuristic)", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\optimization.py", + "ast_data": "FunctionDef name:gen_mkl_autotuner arg:example_inputs arg:iters arg:warmup arguments arg arg arg Assign Assign FunctionDef name:use_mkl_heuristic arg:graph arguments arg Assign If Compare Assign Assign Call Call Assign Call Assign Call Assign Call FunctionDef name:benchmark arg:f arguments arg For Call Call Assign Call For Call Call Return return:yes Call Assign Call arguments Call Call Call Call Call Call Assign Call arguments Call Return return:yes Compare Return return:yes" + }, + { + "library": "cherrypy", + "name": "process", + "source_code": "def process(self):\n h = cherrypy.serving.request.headers\n if 'Content-Length' not in h and 'Transfer-Encoding' not in h:\n raise cherrypy.HTTPError(411)\n self.fp = SizedReader(self.fp, self.length, self.maxbytes, bufsize=self.bufsize, has_trailers='Trailer' in h)\n super(RequestBody, self).process()\n request_params = self.request_params\n for key, value in self.params.items():\n if key in request_params:\n if not isinstance(request_params[key], list):\n request_params[key] = [request_params[key]]\n request_params[key].append(value)\n else:\n request_params[key] = value", + "docstring": "Process the request entity based on its Content-Type.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpreqbody.py", + "ast_data": "FunctionDef name:process arg:self arguments arg Assign If BoolOp Compare Compare Raise Call Assign Call Compare Call Call Assign For Call If Compare If Call Assign Call Assign" + }, + { + "library": "pygame", + "name": "get_sprites_at", + "source_code": "def get_sprites_at(self, pos):\n _sprites = self._spritelist\n rect = Rect(pos, (1, 1))\n colliding_idx = rect.collidelistall(_sprites)\n return [_sprites[i] for i in colliding_idx]", + "docstring": "return a list with all sprites at that position LayeredUpdates.get_sprites_at(pos): return colliding_sprites Bottom sprites are listed first; the top ones are listed last.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:get_sprites_at arg:self arg:pos arguments arg arg Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_check_row_col", + "source_code": "def _check_row_col(self, row, col):\n for name, tensor in [['row', row], ['col', col]]:\n if tensor.shape.ndims is not None and tensor.shape.ndims < 1:\n raise ValueError('Argument {} must have at least 1 dimension. Found: {}'.format(name, tensor))\n if row.shape[-1] is not None and col.shape[-1] is not None:\n if row.shape[-1] != col.shape[-1]:\n raise ValueError('Expected square matrix, got row and col with mismatched dimensions.')", + "docstring": "Static check of row and column.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_toeplitz.py", + "ast_data": "FunctionDef name:_check_row_col arg:self arg:row arg:col arguments arg arg arg For If BoolOp Compare Compare Raise Call Call If BoolOp Compare Compare If Compare Raise Call" + }, + { + "library": "sphinx", + "name": "_get_domain_from_url", + "source_code": "def _get_domain_from_url(url: str) -> str:\n return url and urllib.parse.urlparse(url).hostname or ''", + "docstring": "Get the domain from a URL.", + "type": "function", + "file_path": "sphinx\\sphinx\\ext\\githubpages.py", + "ast_data": "FunctionDef name:_get_domain_from_url arg:url arguments arg Return return:yes BoolOp BoolOp Call" + }, + { + "library": "tensorflow", + "name": "update_state", + "source_code": "def update_state(self, y_true, y_pred, sample_weight=None):\n y_true = math_ops.cast(y_true, self._dtype)\n y_pred = math_ops.cast(y_pred, self._dtype)\n if y_pred.shape.ndims > 1:\n y_pred = array_ops.reshape(y_pred, [-1])\n if y_true.shape.ndims > 1:\n y_true = array_ops.reshape(y_true, [-1])\n if sample_weight is not None:\n sample_weight = math_ops.cast(sample_weight, self._dtype)\n if sample_weight.shape.ndims > 1:\n sample_weight = array_ops.reshape(sample_weight, [-1])\n current_cm = confusion_matrix.confusion_matrix(y_true, y_pred, self.num_classes, weights=sample_weight, dtype=self._dtype)\n return self.total_cm.assign_add(current_cm)", + "docstring": "Accumulates the confusion matrix statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to 1. Can be a whose rank is either 0, or the same rank as , and must be broadcastable to . Returns: Update op.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "FunctionDef name:update_state arg:self arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg arg Assign Call Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "__mul__", + "source_code": "def __mul__(self, other):\n if not self._check_binop_other(other):\n return NotImplemented\n if isinstance(other, StateSpace):\n if type(other) is not type(self):\n return NotImplemented\n if self.dt != other.dt:\n raise TypeError('Cannot multiply systems with different `dt`.')\n n1 = self.A.shape[0]\n n2 = other.A.shape[0]\n a = np.vstack((np.hstack((self.A, np.dot(self.B, other.C))), np.hstack((zeros((n2, n1)), other.A))))\n b = np.vstack((np.dot(self.B, other.D), other.B))\n c = np.hstack((self.C, np.dot(self.D, other.C)))\n d = np.dot(self.D, other.D)\n else:\n a = self.A\n b = np.dot(self.B, other)\n c = self.C\n d = np.dot(self.D, other)\n common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype)\n return StateSpace(np.asarray(a, dtype=common_dtype), np.asarray(b, dtype=common_dtype), np.asarray(c, dtype=common_dtype), np.asarray(d, dtype=common_dtype), **self._dt_dict)", + "docstring": "Post-multiply another system or a scalar Handles multiplication of systems in the sense of a frequency domain multiplication. That means, given two systems E1(s) and E2(s), their multiplication, H(s) = E1(s) * E2(s), means that applying H(s) to U(s) is equivalent to first applying E2(s), and then E1(s). Notes ----- For SISO systems the order of system application does not matter. However, for MIMO systems, where the two systems are matrices, the order above ensures standard Matrix multiplication rules apply.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:__mul__ arg:self arg:other arguments arg arg If Call Return return:yes If Call If Compare Call Call Return return:yes If Compare Raise Call Assign Assign Assign Call Call Call Call Call Assign Call Call Assign Call Call Assign Call Assign Assign Call Assign Assign Call Assign Call Return return:yes Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "get_device_name", + "source_code": "def get_device_name(device: Optional[_device_t]=None) -> str:\n return get_device_properties(device).name", + "docstring": "Get the name of a device. Args: device (torch.device or int or str, optional): device for which to return the name. This function is a no-op if this argument is a negative integer. It uses the current device, given by :func:, if :attr: is `` (default). Returns: str: the name of the device", + "type": "function", + "file_path": "pytorch\\torch\\xpu\\__init__.py", + "ast_data": "FunctionDef name:get_device_name arg:device arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "partial_fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y=None):\n has_components = hasattr(self, 'components_')\n X = validate_data(self, X, dtype=[np.float64, np.float32], order='C', reset=not has_components)\n if not has_components:\n self._check_params(X)\n self._random_state = check_random_state(self.random_state)\n dictionary = self._initialize_dict(X, self._random_state)\n self.n_steps_ = 0\n self._A = np.zeros((self._n_components, self._n_components), dtype=X.dtype)\n self._B = np.zeros((X.shape[1], self._n_components), dtype=X.dtype)\n else:\n dictionary = self.components_\n self._minibatch_step(X, dictionary, self._random_state, self.n_steps_)\n self.components_ = dictionary\n self.n_steps_ += 1\n return self", + "docstring": "Update the model using the data in X as a mini-batch. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Return the instance itself.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py", + "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arguments arg arg arg Assign Call Assign Call If Call Assign Call Assign Call Assign Assign Call Assign Call Assign Call Assign Return return:yes Call" + }, + { + "library": "numpy", + "name": "take", + "source_code": "@array_function_dispatch(_take_dispatcher)\ndef take(a, indices, axis=None, out=None, mode='raise'):\n return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)", + "docstring": "Take elements from an array along an axis. When axis is not None, this function does the same thing as \"fancy\" indexing (indexing arrays using arrays); however, it can be easier to use if you need elements along a given axis. A call such as `ndindexoutmode='raise'as_takeapply_along_axisaindices` is not one dimensional, the output also has these dimensions. >>> np.take(a, [[0, 1], [2, 3]]) array([[4, 3], [5, 7]])", + "type": "function", + "file_path": "numpy\\numpy\\_core\\fromnumeric.py", + "ast_data": "FunctionDef name:take arg:a arg:indices arg:axis arg:out arg:mode arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "scrapy", + "name": "urljoin", + "source_code": "def urljoin(self, url: str) -> str:\n return urljoin(self.url, url)", + "docstring": "Join this Response's url with a possible relative url to form an absolute interpretation of the latter.", + "type": "method", + "file_path": "scrapy\\scrapy\\http\\response\\__init__.py", + "ast_data": "FunctionDef name:urljoin arg:self arg:url arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "isgeneratorfunction", + "source_code": "def isgeneratorfunction(object):\n return _inspect.isgeneratorfunction(tf_decorator.unwrap(object)[1])", + "docstring": "TFDecorator-aware replacement for inspect.isgeneratorfunction.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py", + "ast_data": "FunctionDef name:isgeneratorfunction arg:object arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "apply", + "source_code": "def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> Sequence[torch.Tensor | int | float | bool | str]:\n for step in self._steps:\n model_outputs = step.apply(model_outputs, model=model)\n return model_outputs", + "docstring": "Converts the PyTorch model outputs to exported ONNX model outputs format. Args: model_outputs: The PyTorch model outputs. model: The PyTorch model. Returns: PyTorch model outputs in exported ONNX model outputs format.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py", + "ast_data": "FunctionDef name:apply arg:self arg:model_outputs arg:model arguments arg arg arg For Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "Hinge", + "source_code": "class Hinge(MeanMetricWrapper):\n\n def __init__(self, name='hinge', dtype=None):\n super(Hinge, self).__init__(hinge, name, dtype=dtype)", + "docstring": "Computes the hinge metric between and . values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Hinge() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 1.3 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 1.1 Usage with API:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "ClassDef name:Hinge FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call" + }, + { + "library": "seaborn", + "name": "resolve_color", + "source_code": "def resolve_color(mark: Mark, data: DataFrame | dict, prefix: str='', scales: dict[str, Scale] | None=None) -> RGBATuple | ndarray:\n color = mark._resolve(data, f'{prefix}color', scales)\n if f'{prefix}alpha' in mark._mappable_props:\n alpha = mark._resolve(data, f'{prefix}alpha', scales)\n else:\n alpha = mark._resolve(data, 'alpha', scales)\n\n def visible(x, axis=None):\n return np.array(x).dtype.kind != 'f' or np.isfinite(x).all(axis)\n if np.ndim(color) < 2 and all((isinstance(x, float) for x in color)):\n if len(color) == 4:\n return mpl.colors.to_rgba(color)\n alpha = alpha if visible(color) else np.nan\n return mpl.colors.to_rgba(color, alpha)\n else:\n if np.ndim(color) == 2 and color.shape[1] == 4:\n return mpl.colors.to_rgba_array(color)\n alpha = np.where(visible(color, axis=1), alpha, np.nan)\n return mpl.colors.to_rgba_array(color, alpha)", + "docstring": "Obtain a default, specified, or mapped value for a color feature. This method exists separately to support the relationship between a color and its corresponding alpha. We want to respect alpha values that are passed in specified (or mapped) color values but also make use of a separate variable, which can be mapped. This approach may also be extended to support mapping of specific color channels (i.e. luminance, chroma) in the future. Parameters ---------- mark : Mark with the color property. data : Container with data values for features that will be semantically mapped. prefix : Support \"color\", \"fillcolor\", etc.", + "type": "function", + "file_path": "seaborn\\seaborn\\_marks\\base.py", + "ast_data": "FunctionDef name:resolve_color arg:mark arg:data arg:prefix arg:scales arguments arg arg arg arg Assign Call If Compare Assign Call Assign Call FunctionDef name:visible arg:x arg:axis arguments arg arg Return return:yes BoolOp Compare Call Call Call If BoolOp Compare Call Call Call If Compare Call Return return:yes Call Assign Call Return return:yes Call If BoolOp Compare Call Compare Return return:yes Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_libstdcpp_version", + "source_code": "def get_libstdcpp_version():\n key = 'libstdcpp_ver'\n out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])\n if err and FLAGS.debug:\n print('Error in detecting libstdc++ version:\\n %s' % str(err))\n ver = out.split(b'_')[-1].replace(b'\\n', b'')\n return ver", + "docstring": "Retrieves version of libstdc++ detected. Returns: String that is the version of libstdc++. e.g. '3.4.25'", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py", + "ast_data": "FunctionDef name:get_libstdcpp_version arguments Assign Assign Call Call If BoolOp Call Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "TypePromotionTable", + "source_code": "class TypePromotionTable:\n\n def __init__(self):\n self._rule_table = {}\n for rule in _GENERATED_ATEN_TYPE_PROMOTION_RULE_SET:\n self.add_rule(rule)\n for rule in _EXTRA_TYPE_PROMOTION_RULE_SET:\n self.add_rule(rule)\n\n def add_rule(self, rule: TypePromotionRule) -> None:\n if not rule.is_valid():\n raise ValueError(f'Invalid type promotion rule: {rule}')\n self._rule_table[f'{rule.namespace}.{rule.op_name}'] = rule\n\n def get_rule(self, py_op: torch._ops.OpOverloadPacket) -> TypePromotionRule | None:\n return self._rule_table.get(str(py_op), None)", + "docstring": "Type promotion table for torch.ops.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py", + "ast_data": "ClassDef name:TypePromotionTable FunctionDef name:__init__ arg:self arguments arg Assign For Call For Call FunctionDef name:add_rule arg:self arg:rule arguments arg arg If Call Raise Call Assign FunctionDef name:get_rule arg:self arg:py_op arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_execution_from_debug_event_proto", + "source_code": "def _execution_from_debug_event_proto(debug_event, locator):\n execution_proto = debug_event.execution\n debug_tensor_values = None\n if execution_proto.tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR:\n pass\n elif execution_proto.tensor_debug_mode != debug_event_pb2.TensorDebugMode.NO_TENSOR:\n debug_tensor_values = []\n for tensor_proto in execution_proto.tensor_protos:\n debug_tensor_values.append(_parse_tensor_value(tensor_proto, return_list=True))\n return Execution(_execution_digest_from_debug_event_proto(debug_event, locator), execution_proto.code_location.host_name, tuple(execution_proto.code_location.stack_frame_ids), execution_proto.tensor_debug_mode, graph_id=execution_proto.graph_id, input_tensor_ids=tuple(execution_proto.input_tensor_ids), output_tensor_ids=tuple(execution_proto.output_tensor_ids), debug_tensor_values=_tuple_or_none(debug_tensor_values))", + "docstring": "Convert a DebugEvent proto into an Execution data object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", + "ast_data": "FunctionDef name:_execution_from_debug_event_proto arg:debug_event arg:locator arguments arg arg Assign Assign If Compare If Compare Assign For Call Call Return return:yes Call Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "benchmark_fused_nodes", + "source_code": "def benchmark_fused_nodes(self, nodes: Sequence[BaseSchedulerNode]) -> tuple[float, str]:\n raise NotImplementedError", + "docstring": "Benchmark fused list of nodes and return the execution time in milliseconds on randomly generated inputs.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:benchmark_fused_nodes arg:self arg:nodes arguments arg arg Raise" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "def fit(self, X, y, **fit_params):\n _raise_for_params(fit_params, self, 'fit', allow=['sample_weight'])\n check_classification_targets(y)\n if type_of_target(y) == 'multilabel-indicator':\n self._label_encoder = [LabelEncoder().fit(yk) for yk in y.T]\n self.classes_ = [le.classes_ for le in self._label_encoder]\n y_encoded = np.array([self._label_encoder[target_idx].transform(target) for target_idx, target in enumerate(y.T)]).T\n else:\n self._label_encoder = LabelEncoder().fit(y)\n self.classes_ = self._label_encoder.classes_\n y_encoded = self._label_encoder.transform(y)\n return super().fit(X, y_encoded, **fit_params)", + "docstring": "Fit the estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Target values. Note that will be internally encoded in numerically increasing order or lexicographic order. If the order matter (e.g. for ordinal regression), one should numerically encode the target before calling :term:. **fit_params : dict Parameters to pass to the underlying estimators. .. versionadded:: 1.6 Only available if , which can be set by using `Metadata Routing User Guide ` for more details. Returns ------- self : object Returns a fitted instance of estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Call Call If Compare Call Assign Call Call Assign Assign Call Call Call Assign Call Call Assign Assign Call Return return:yes Call Call" + }, + { + "library": "sphinx", + "name": "translatable", + "source_code": "class translatable(nodes.Node):\n\n def preserve_original_messages(self) -> None:\n raise NotImplementedError\n\n def apply_translated_message(self, original_message: str, translated_message: str) -> None:\n raise NotImplementedError\n\n def extract_original_messages(self) -> Sequence[str]:\n raise NotImplementedError", + "docstring": "Node which supports translation. The translation goes forward with following steps: 1. Preserve original translatable messages 2. Apply translated messages from message catalog 3. Extract preserved messages (for gettext builder) The translatable nodes MUST preserve original messages. And these messages should not be overridden at applying step. Because they are used at final step; extraction.", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:translatable FunctionDef name:preserve_original_messages arg:self arguments arg Raise FunctionDef name:apply_translated_message arg:self arg:original_message arg:translated_message arguments arg arg arg Raise FunctionDef name:extract_original_messages arg:self arguments arg Raise" + }, + { + "library": "tensorflow", + "name": "MaxPooling2D", + "source_code": "class MaxPooling2D(keras_layers.MaxPooling2D, base.Layer):\n\n def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n if strides is None:\n raise ValueError('Argument `strides` must not be None.')\n super(MaxPooling2D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)", + "docstring": "Max pooling layer for 2D inputs (e.g. images). Args: pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. (default) and are supported. corresponds to inputs with shape while corresponds to inputs with shape . name: A string, the name of the layer.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\pooling.py", + "ast_data": "ClassDef name:MaxPooling2D FunctionDef name:__init__ arg:self arg:pool_size arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg arg If Compare Raise Call Call Call" + }, + { + "library": "kornia", + "name": "_mean_isotropic_scale_normalize", + "source_code": "def _mean_isotropic_scale_normalize(points: torch.Tensor, eps: float=1e-08) -> Tuple[torch.Tensor, torch.Tensor]:\n KORNIA_CHECK_SHAPE(points, ['B', 'N', 'D'])\n x_mean = torch.mean(points, dim=1, keepdim=True)\n scale = (points - x_mean).norm(dim=-1, p=2).mean(dim=-1)\n D_int = points.shape[-1]\n D_float = torch.tensor(points.shape[-1], dtype=torch.float64, device=points.device)\n scale = torch.sqrt(D_float) / (scale + eps)\n transform = eye_like(D_int + 1, points)\n idxs = arange(D_int, dtype=torch.int64, device=points.device)\n transform[:, idxs, idxs] = transform[:, idxs, idxs] * scale[:, None]\n transform[:, idxs, D_int] = transform[:, idxs, D_int] + -scale[:, None] * x_mean[:, 0, idxs]\n points_norm = transform_points(transform, points)\n return (points_norm, transform)", + "docstring": "Normalize points. Args: points : Tensor containing the points to be normalized with shape :math:. eps : Small value to avoid division by zero error. Returns: Tuple containing the normalized points in the shape :math: and the transformation matrix in the shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\calibration\\pnp.py", + "ast_data": "FunctionDef name:_mean_isotropic_scale_normalize arg:points arg:eps arguments arg arg Call Assign Call Assign Call Call Assign Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "_logpdf", + "source_code": "def _logpdf(self, x, dim, mu, kappa):\n x = np.asarray(x)\n self._check_data_vs_dist(x, dim)\n dotproducts = np.einsum('i,...i->...', mu, x)\n return self._log_norm_factor(dim, kappa) + kappa * dotproducts", + "docstring": "Log of the von Mises-Fisher probability density function. As this function does no argument checking, it should not be called directly; use 'logpdf' instead.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:_logpdf arg:self arg:x arg:dim arg:mu arg:kappa arguments arg arg arg arg arg Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "set_yi", + "source_code": "def set_yi(self, yi, axis=None):\n if yi is None:\n self.yi = None\n return\n self._set_yi(yi, xi=self.xi, axis=axis)\n self.yi = self._reshape_yi(yi)\n self.n, self.r = self.yi.shape\n self._diff_baryint = None", + "docstring": "Update the y values to be interpolated The barycentric interpolation algorithm requires the calculation of weights, but these depend only on the . The can be changed at any time. Parameters ---------- yi : array_like The y-coordinates of the points the polynomial will pass through. If None, the y values must be supplied later. axis : int, optional Axis in the array corresponding to the x-coordinate values.", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_polyint.py", + "ast_data": "FunctionDef name:set_yi arg:self arg:yi arg:axis arguments arg arg arg If Compare Assign Return return:no Call Assign Call Assign Assign" + }, + { + "library": "scikit-learn", + "name": "Options", + "source_code": "class Options(_Constraint):\n\n def __init__(self, type, options, *, deprecated=None):\n super().__init__()\n self.type = type\n self.options = options\n self.deprecated = deprecated or set()\n if self.deprecated - self.options:\n raise ValueError('The deprecated options must be a subset of the options.')\n\n def is_satisfied_by(self, val):\n return isinstance(val, self.type) and val in self.options\n\n def _mark_if_deprecated(self, option):\n option_str = f'{option!r}'\n if option in self.deprecated:\n option_str = f'{option_str} (deprecated)'\n return option_str\n\n def __str__(self):\n options_str = f'{', '.join([self._mark_if_deprecated(o) for o in self.options])}'\n return f'a {_type_name(self.type)} among {{{options_str}}}'", + "docstring": "Constraint representing a finite set of instances of a given type. Parameters ---------- type : type options : set The set of valid scalars. deprecated : set or None, default=None A subset of the to mark as deprecated in the string representation of the constraint.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py", + "ast_data": "ClassDef name:Options FunctionDef name:__init__ arg:self arg:type arg:options arguments arg arg arg arg Call Call Assign Assign Assign BoolOp Call If Raise Call FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg Return return:yes BoolOp Call Compare FunctionDef name:_mark_if_deprecated arg:self arg:option arguments arg arg Assign If Compare Assign Return return:yes FunctionDef name:__str__ arg:self arguments arg Assign Call Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "_parse_policy_maxopt", + "source_code": "def _parse_policy_maxopt(self, has_baseline, final_targets, extra_flags):\n if self.cc_has_debug:\n self.dist_log(\"debug mode is detected, policy 'maxopt' is skipped.\")\n elif self.cc_noopt:\n self.dist_log(\"optimization is disabled, policy 'maxopt' is skipped.\")\n else:\n flags = self.cc_flags['opt']\n if not flags:\n self.dist_log(\"current compiler doesn't support optimization flags, policy 'maxopt' is skipped\", stderr=True)\n else:\n extra_flags += flags\n return (has_baseline, final_targets, extra_flags)", + "docstring": "append the compiler optimization flags", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py", + "ast_data": "FunctionDef name:_parse_policy_maxopt arg:self arg:has_baseline arg:final_targets arg:extra_flags arguments arg arg arg arg If Call If Call Assign If Call Return return:yes" + }, + { + "library": "numpy", + "name": "__gt__", + "source_code": "def __gt__(self, other):\n return greater(self, other)", + "docstring": "Return (self > other) element-wise. See Also -------- greater", + "type": "method", + "file_path": "numpy\\numpy\\_core\\defchararray.py", + "ast_data": "FunctionDef name:__gt__ arg:self arg:other arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, ax, label, initial='', *, color='.95', hovercolor='1', label_pad=0.01, textalignment='left'):\n super().__init__(ax)\n self._text_position = _api.check_getitem({'left': 0.05, 'center': 0.5, 'right': 0.95}, textalignment=textalignment)\n self.label = ax.text(-label_pad, 0.5, label, transform=ax.transAxes, verticalalignment='center', horizontalalignment='right')\n self.text_disp = self.ax.text(self._text_position, 0.5, initial, transform=self.ax.transAxes, verticalalignment='center', horizontalalignment=textalignment, parse_math=False)\n self._observers = cbook.CallbackRegistry(signals=['change', 'submit'])\n ax.set(xlim=(0, 1), ylim=(0, 1), navigate=False, facecolor=color, xticks=[], yticks=[])\n self.cursor_index = 0\n self.cursor = ax.vlines(0, 0, 0, visible=False, color='k', lw=1, transform=mpl.transforms.IdentityTransform())\n self.connect_event('button_press_event', self._click)\n self.connect_event('button_release_event', self._release)\n self.connect_event('motion_notify_event', self._motion)\n self.connect_event('key_press_event', self._keypress)\n self.connect_event('resize_event', self._resize)\n self.color = color\n self.hovercolor = hovercolor\n self.capturekeystrokes = False", + "docstring": "Parameters ---------- ax : The instance the button will be placed into. label : str Label for this text box. initial : str Initial value in the text box. color : :mpltype: The color of the box. hovercolor : :mpltype: The color of the box when the mouse is over it. label_pad : float The distance between the label and the right side of the textbox. textalignment : {'left', 'center', 'right'} The horizontal location of the text.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:ax arg:label arg:initial arguments arg arg arg arg arg arg arg arg Call Call Assign Call Assign Call Assign Call Assign Call Call Assign Assign Call Call Call Call Call Call Call Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, node_def, op, message, *args):\n super(DeadlineExceededError, self).__init__(node_def, op, message, DEADLINE_EXCEEDED, *args)", + "docstring": "Creates a .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call" + }, + { + "library": "pytorch", + "name": "_type_repr", + "source_code": "def _type_repr(obj: object) -> str:\n if isinstance(obj, type) and (not isinstance(obj, types.GenericAlias)):\n if obj.__module__ == 'builtins':\n return obj.__qualname__\n return f'{obj.__module__}.{obj.__qualname__}'\n if obj is ...:\n return '...'\n if isinstance(obj, types.FunctionType):\n return obj.__name__\n return repr(obj)", + "docstring": "Return the repr() of an object, special-casing types (internal helper). If obj is a type, we return a shorter version than the default type.__repr__, based on the module and qualified name, which is typically enough to uniquely identify a type. For everything else, we fall back on repr(obj).", + "type": "function", + "file_path": "pytorch\\torch\\fx\\node.py", + "ast_data": "FunctionDef name:_type_repr arg:obj arguments arg If BoolOp Call Call If Compare Return return:yes Return return:yes If Compare Return return:yes If Call Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_retrieve_variables_impl", + "source_code": "@def_function.function\ndef _retrieve_variables_impl(config: Text, hosts: List[Tuple[int, Text]], variables: Dict[Text, Dict[Text, tf_variables.Variable]], table_config: tpu_embedding_v2_utils.TableConfig):\n for host_id, host in enumerate(hosts):\n with ops.device(host):\n for table in table_config:\n retrieved = table.optimizer._retrieve()(table_name=table.name, num_shards=len(hosts), shard_id=host_id, config=config)\n if not isinstance(retrieved, tuple):\n retrieved = (retrieved,)\n for i, slot in enumerate(['parameters'] + table.optimizer._slot_names()):\n sharded_var = variables[table.name][slot]\n if host_id < len(sharded_var.variables):\n sharded_var.variables[host_id].assign(retrieved[i])\n config = None", + "docstring": "Retrieve embedding tables from TPU to host memory. Args: config: A serialized TPUEmbeddingConfiguration proto. hosts: A list of all the host CPU devices. variables: A dictionary of dictionaries of TPUEmbeddingVariables. First key is the table name, second key is 'parameters' or the optimizer slot name. table_config: A list of tf.tpu.experimental.embedding.TableConfig objects.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py", + "ast_data": "FunctionDef name:_retrieve_variables_impl arg:config arg:hosts arg:variables arg:table_config arguments arg arg arg arg For Call With Call For Assign Call Call Call If Call Assign For Call Call Assign If Compare Call Call Assign" + }, + { + "library": "pytorch", + "name": "remove_node", + "source_code": "def remove_node(model: GraphModule, node: Node, prev_node: Node):\n orig_users = list(node.users.keys())\n for user_node in orig_users:\n user_node.replace_input_with(node, prev_node)\n model.graph.erase_node(node)", + "docstring": "Removes the given node from the model by replacing all of its users with the given previous node", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py", + "ast_data": "FunctionDef name:remove_node arg:model arg:node arg:prev_node arguments arg arg arg Assign Call Call For Call Call" + }, + { + "library": "authlib", + "name": "validate_sector_identifier_uri", + "source_code": "def validate_sector_identifier_uri(self):\n self._validate_uri('sector_identifier_uri')", + "docstring": "URL using the https scheme to be used in calculating Pseudonymous Identifiers by the OP. The URL references a file with a single JSON array of redirect_uri values. Please see Section 5. Providers that use pairwise sub (subject) values SHOULD utilize the sector_identifier_uri value provided in the Subject Identifier calculation for pairwise identifiers.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\registration\\claims.py", + "ast_data": "FunctionDef name:validate_sector_identifier_uri arg:self arguments arg Call" + }, + { + "library": "tensorflow", + "name": "_get_module_wrapper", + "source_code": "def _get_module_wrapper(module: str, output_dir: str, output_package: str, api_version: int, symbols_by_module: Mapping[str, set[_Entrypoint]], use_lazy_loading: bool) -> str:\n if api_version != 1 and (not use_lazy_loading):\n return ''\n deprecated = 'False'\n has_lite = 'False'\n public_apis_name = 'None'\n if api_version == 1 and (not output_dir.strip('/').endswith('compat/v1')):\n deprecated = 'True'\n if 'lite' in symbols_by_module and use_lazy_loading:\n has_lite = 'True'\n if use_lazy_loading:\n public_apis_name = '_PUBLIC_APIS'\n return _DEPRECATION_FOOTER % (module.removeprefix(output_package).strip('.'), public_apis_name, deprecated, has_lite)", + "docstring": "Returns the module wrapper for the given module.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\generator\\generator.py", + "ast_data": "FunctionDef name:_get_module_wrapper arg:module arg:output_dir arg:output_package arg:api_version arg:symbols_by_module arg:use_lazy_loading arguments arg arg arg arg arg arg If BoolOp Compare Return return:yes Assign Assign Assign If BoolOp Compare Call Call Assign If BoolOp Compare Assign If Assign Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "fast_forward", + "source_code": "def fast_forward(self, n: IntNumber) -> 'Sobol':\n if self.num_generated == 0:\n _fast_forward(n=n - 1, num_gen=self.num_generated, dim=self.d, sv=self._sv, quasi=self._quasi)\n else:\n _fast_forward(n=n, num_gen=self.num_generated - 1, dim=self.d, sv=self._sv, quasi=self._quasi)\n self.num_generated += n\n return self", + "docstring": "Fast-forward the sequence by positions. Parameters ---------- n : int Number of points to skip in the sequence. Returns ------- engine : Sobol The fast-forwarded engine.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_qmc.py", + "ast_data": "FunctionDef name:fast_forward arg:self arg:n arguments arg arg If Compare Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "_iter_column_arrays", + "source_code": "def _iter_column_arrays(self) -> Iterator[ArrayLike]:\n for i in range(len(self.columns)):\n yield self._get_column_array(i)", + "docstring": "Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes).", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:_iter_column_arrays arg:self arguments arg For Call Call Call" + }, + { + "library": "cherrypy", + "name": "kwargs", + "source_code": "@kwargs.setter\ndef kwargs(self, kwargs):\n cherrypy.serving.request.kwargs = kwargs\n return cherrypy.serving.request.kwargs", + "docstring": "Set the named request keyword arguments as :class:.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpdispatch.py", + "ast_data": "FunctionDef name:kwargs arg:self arg:kwargs arguments arg arg Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_open_ring_2d", + "source_code": "def _open_ring_2d(x_size: int, y_size: int, z_coord: int) -> List[Tuple[int, int, int]]:\n ret = []\n for i in range(y_size // 2):\n for j in range(1, x_size):\n ret.append((j, 2 * i, z_coord))\n for j in range(x_size - 1, 0, -1):\n ret.append((j, 2 * i + 1, z_coord))\n for i in range(y_size - 1, 0, -1):\n ret.append((0, i, z_coord))\n return ret", + "docstring": "Ring-order of a X by Y mesh, with a fixed Z coordinate. For example, in a 4x4 mesh, this returns the following order. 0 -- 1 -- 2 -- 3 | | | | 15-- 6 -- 5 -- 4 | | | | 14-- 7 -- 8 -- 9 | | | | 13-- 12-- 11-- 10 Note that chip 0 is not included in the output. Args: x_size: An integer represents the mesh size in the x-dimension. Must be larger than 1. y_size: An integer represents the mesh size in the y-dimension. Must be larger than 1. z_coord: An integer represents the z-coordinate to use for the chips in the ring. Returns: A list of (x,y,z) triples in ring order.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\device_assignment.py", + "ast_data": "FunctionDef name:_open_ring_2d arg:x_size arg:y_size arg:z_coord arguments arg arg arg Assign For Call For Call Call For Call Call For Call Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "report", + "source_code": "@cherrypy.expose\ndef report(self, name):\n filename, statements, excluded, missing, _ = self.coverage.analysis2(name)\n pc = _percent(statements, missing)\n yield (TEMPLATE_COVERAGE % dict(name=os.path.basename(name), fullpath=name, pc=pc))\n yield '\\n'\n for line in self.annotated_file(filename, statements, excluded, missing):\n yield line\n yield '
'\n yield ''\n yield ''", + "docstring": "Render coverage stats as HTML.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\covercp.py", + "ast_data": "FunctionDef name:report arg:self arg:name arguments arg arg Assign Call Assign Call Call Call For Call" + }, + { + "library": "django", + "name": "get_action_choices", + "source_code": "def get_action_choices(self, request, default_choices=models.BLANK_CHOICE_DASH):\n choices = [*default_choices]\n for func, name, description in self.get_actions(request).values():\n choice = (name, description % model_format_dict(self.opts))\n choices.append(choice)\n return choices", + "docstring": "Return a list of choices for use in a form object. Each choice is a tuple (name, description).", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:get_action_choices arg:self arg:request arg:default_choices arguments arg arg arg Assign For Call Call Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "blend_overlay", + "source_code": "def blend_overlay(self, rgb, intensity):\n low = 2 * intensity * rgb\n high = 1 - 2 * (1 - intensity) * (1 - rgb)\n return np.where(rgb <= 0.5, low, high)", + "docstring": "Combine an RGB image with an intensity map using \"overlay\" blending. Parameters ---------- rgb : An (M, N, 3) RGB array of floats ranging from 0 to 1 (color image). intensity : An (M, N, 1) array of floats ranging from 0 to 1 (grayscale image). Returns ------- ndarray An (M, N, 3) RGB array representing the combined images.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:blend_overlay arg:self arg:rgb arg:intensity arguments arg arg arg Assign Assign Return return:yes Call Compare" + }, + { + "library": "matplotlib", + "name": "get_offset", + "source_code": "@_compat_get_offset\ndef get_offset(self, bbox, renderer):\n return self._offset(bbox.width, bbox.height, -bbox.x0, -bbox.y0, renderer) if callable(self._offset) else self._offset", + "docstring": "Return the offset as a tuple (x, y). The extent parameters have to be provided to handle the case where the offset is dynamically determined by a callable (see ). Parameters ---------- bbox : renderer : subclass", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", + "ast_data": "FunctionDef name:get_offset arg:self arg:bbox arg:renderer arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_m_step", + "source_code": "@abstractmethod\ndef _m_step(self, X, log_resp):\n pass", + "docstring": "M step. Parameters ---------- X : array-like of shape (n_samples, n_features) log_resp : array-like of shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\mixture\\_base.py", + "ast_data": "FunctionDef name:_m_step arg:self arg:X arg:log_resp arguments arg arg arg" + }, + { + "library": "kornia", + "name": "solve_quadratic", + "source_code": "def solve_quadratic(coeffs: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(coeffs, ['B', '3'])\n a = coeffs[:, 0]\n b = coeffs[:, 1]\n c = coeffs[:, 2]\n delta = b * b - 4 * a * c\n mask_negative = delta < 0\n mask_zero = delta == 0\n inv_2a = 0.5 / a\n solutions = zeros((coeffs.shape[0], 2), device=coeffs.device, dtype=coeffs.dtype)\n if torch.any(mask_zero):\n solutions[mask_zero, 0] = -b[mask_zero] * inv_2a[mask_zero]\n solutions[mask_zero, 1] = solutions[mask_zero, 0]\n sqrt_delta = torch.sqrt(delta)\n mask = torch.bitwise_and(~mask_negative, ~mask_zero)\n if torch.any(mask):\n solutions[mask, 0] = (-b[mask] + sqrt_delta[mask]) * inv_2a[mask]\n solutions[mask, 1] = (-b[mask] - sqrt_delta[mask]) * inv_2a[mask]\n return solutions", + "docstring": "Solve given quadratic equation. The function takes the coefficients of quadratic equation and returns the real roots. .. math:: coeffs[0]x^2 + coeffs[1]x + coeffs[2] = 0 Args: coeffs : The coefficients of quadratic equation : Returns: A tensor of shape containing the real roots to the quadratic equation. Example: >>> coeffs = torch.tensor([[1., 4., 4.]]) >>> roots = solve_quadratic(coeffs) .. note:: In cases where a quadratic polynomial has only one real root, the output will be in the format [real_root, 0]. And for the complex roots should be represented as 0. This is done to maintain a consistent output shape for all cases.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\solvers\\polynomial_solver.py", + "ast_data": "FunctionDef name:solve_quadratic arg:coeffs arguments arg Call Assign Assign Assign Assign Assign Compare Assign Compare Assign Assign Call If Call Assign Assign Assign Call Assign Call If Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "slice_input_producer", + "source_code": "@tf_export(v1=['train.slice_input_producer'])\n@deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.from_tensor_slices(tuple(tensor_list)).shuffle(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If `shuffle=False`, omit the `.shuffle(...)`.')\ndef slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None, capacity=32, shared_name=None, name=None):\n with ops.name_scope(name, 'input_producer', tensor_list):\n tensor_list = indexed_slices.convert_n_to_tensor_or_indexed_slices(tensor_list)\n if not tensor_list:\n raise ValueError('Expected at least one tensor in slice_input_producer().')\n range_size = array_ops.shape(tensor_list[0])[0]\n queue = range_input_producer(range_size, num_epochs=num_epochs, shuffle=shuffle, seed=seed, capacity=capacity, shared_name=shared_name)\n index = queue.dequeue()\n output = [array_ops.gather(t, index) for t in tensor_list]\n return output", + "docstring": "Produces a slice of each in . Implemented using a Queue -- a for the Queue is added to the current 's collection. Args: tensor_list: A list of objects. Every in must have the same size in the first dimension. num_epochs: An integer (optional). If specified, produces each slice times before generating an error. If not specified, can cycle through the slices an unlimited number of times. shuffle: Boolean. If true, the integers are randomly shuffled within each epoch. seed: An integer (optional). Seed used if shuffle == True. capacity: An integer. Sets the queue capacity. shared_name: (optional). If set, this queue will be shared under the given name across multiple sessions. name: A name for the operations (optional). Returns: A list of tensors, one for each element of . If the tensor in has shape , then the corresponding output tensor will have shape . Raises: ValueError: if produces nothing from . @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the API to ingest data under eager execution. @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\input.py", + "ast_data": "FunctionDef name:slice_input_producer arg:tensor_list arg:num_epochs arg:shuffle arg:seed arg:capacity arg:shared_name arg:name arguments arg arg arg arg arg arg arg With Call Assign Call If Raise Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "clear_safe_globals", + "source_code": "def clear_safe_globals() -> None:\n _weights_only_unpickler._clear_safe_globals()", + "docstring": "Clears the list of globals that are safe for `` load.", + "type": "function", + "file_path": "pytorch\\torch\\serialization.py", + "ast_data": "FunctionDef name:clear_safe_globals arguments Call" + }, + { + "library": "tensorflow", + "name": "real_dtype", + "source_code": "@property\ndef real_dtype(self):\n base = self.base_dtype\n if base == complex64:\n return float32\n elif base == complex128:\n return float64\n else:\n return self", + "docstring": "Returns the corresponding to this 's real part.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py", + "ast_data": "FunctionDef name:real_dtype arg:self arguments arg Assign If Compare Return return:yes If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_merge_tensor_signatures", + "source_code": "def _merge_tensor_signatures(self, signatures):\n sorted_update = []\n if self._num_signature_dimensions() > 1:\n signature_indices = self._signature_types()\n for _, val in sorted(signatures.items(), key=lambda item: signature_indices[item[0]]):\n sorted_update.append(val)\n updates = array_ops_stack.stack(sorted_update, axis=0, name='merge_single_op_signatures')\n elif self._num_signature_dimensions() == 1:\n (_, val), = signatures.items()\n updates = val\n else:\n raise ValueError('Cannot merge 0 signatures. Check the value passed for flag --signatures.')\n return updates", + "docstring": "Returns a tensor that merges the given signatures. Args: signatures: A dictionary of the signature updates from signature name to a tensor of dimension [1]. Returns: A tensor that concats the signature values in a predefined order. Raises: ValueError: Unable to merge signatures.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:_merge_tensor_signatures arg:self arg:signatures arguments arg arg Assign If Compare Call Assign Call For Call Call arguments arg Call Assign Call If Compare Call Assign Call Assign Raise Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_tool_keymap", + "source_code": "def get_tool_keymap(self, name):\n keys = [k for k, i in self._keys.items() if i == name]\n return keys", + "docstring": "Return the keymap associated with the specified tool. Parameters ---------- name : str Name of the Tool. Returns ------- list of str List of keys associated with the tool.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py", + "ast_data": "FunctionDef name:get_tool_keymap arg:self arg:name arguments arg arg Assign Call Compare Return return:yes" + }, + { + "library": "tensorflow", + "name": "CompatV1ImportReplacer", + "source_code": "class CompatV1ImportReplacer(ast.NodeVisitor):\n\n def visit_Import(self, node):\n for import_alias in node.names:\n if import_alias.name == 'tensorflow.compat.v1' and import_alias.asname == 'tf':\n import_alias.name = 'tensorflow'\n self.generic_visit(node)", + "docstring": "AST Visitor that replaces . Converts to", + "type": "class", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py", + "ast_data": "ClassDef name:CompatV1ImportReplacer FunctionDef name:visit_Import arg:self arg:node arguments arg arg For If BoolOp Compare Compare Assign Call" + }, + { + "library": "seaborn", + "name": "theme", + "source_code": "def theme(self, config: Mapping[str, Any], /) -> Plot:\n new = self._clone()\n rc = mpl.RcParams(config)\n new._theme.update(rc)\n return new", + "docstring": "Control the appearance of elements in the plot. .. note:: The API for customizing plot appearance is not yet finalized. Currently, the only valid argument is a dict of matplotlib rc parameters. (This dict must be passed as a positional argument.) It is likely that this method will be enhanced in future releases. Matplotlib rc parameters are documented on the following page: Examples -------- .. include:: ../docstrings/objects.Plot.theme.rst", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\plot.py", + "ast_data": "FunctionDef name:theme arguments arg arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_deduplicate_indexed_slices", + "source_code": "def _deduplicate_indexed_slices(values, indices):\n unique_indices, new_index_positions = array_ops.unique(indices)\n summed_values = math_ops.unsorted_segment_sum(values, new_index_positions, array_ops.shape(unique_indices)[0])\n return (summed_values, unique_indices)", + "docstring": "Sums associated with any non-unique . Args: values: A with rank >= 1. indices: A one-dimensional integer , indexing into the first dimension of (as in an IndexedSlices object). Returns: A tuple of (, ) where is a de-duplicated version of and contains the sum of slices associated with each unique index.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py", + "ast_data": "FunctionDef name:_deduplicate_indexed_slices arg:values arg:indices arguments arg arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "set_action_env_var", + "source_code": "def set_action_env_var(environ_cp, var_name, query_item, enabled_by_default, question=None, yes_reply=None, no_reply=None, bazel_config_name=None):\n var = int(get_var(environ_cp, var_name, query_item, enabled_by_default, question, yes_reply, no_reply))\n if not bazel_config_name:\n write_action_env_to_bazelrc(var_name, var)\n elif var:\n write_to_bazelrc('build --config=%s' % bazel_config_name)\n environ_cp[var_name] = str(var)", + "docstring": "Set boolean action_env variable. Ask user if query_item will be enabled. Default is used if no input is given. Set environment variable and write to .bazelrc. Args: environ_cp: copy of the os.environ. var_name: string for name of environment variable, e.g. \"TF_NEED_CUDA\". query_item: string for feature related to the variable, e.g. \"CUDA for Nvidia GPUs\". enabled_by_default: boolean for default behavior. question: optional string for how to ask for user input. yes_reply: optional string for reply when feature is enabled. no_reply: optional string for reply when feature is disabled. bazel_config_name: adding config to .bazelrc instead of action_env.", + "type": "function", + "file_path": "tensorflow\\configure.py", + "ast_data": "FunctionDef name:set_action_env_var arg:environ_cp arg:var_name arg:query_item arg:enabled_by_default arg:question arg:yes_reply arg:no_reply arg:bazel_config_name arguments arg arg arg arg arg arg arg arg Assign Call Call If Call If Call Assign Call" + }, + { + "library": "pytorch", + "name": "placements", + "source_code": "@property\ndef placements(self) -> tuple[Placement, ...]:\n return self._spec.placements", + "docstring": "The placements attribute of this DTensor that describes the layout of this DTensor on the its DeviceMesh. .. note:: `` is a read-only property, it can not be set.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py", + "ast_data": "FunctionDef name:placements arg:self arguments arg Return return:yes" + }, + { + "library": "authlib", + "name": "authenticate_client", + "source_code": "def authenticate_client(self, request):\n client = self.server.authenticate_client(request, self.CLIENT_AUTH_METHODS, self.ENDPOINT_NAME)\n request.client = client\n return client", + "docstring": "client_id is REQUIRED **if the client is not** authenticating with the authorization server as described in Section 3.2.1. of [RFC6749]. This means the endpoint support \"none\" authentication method. In this case, this endpoint's auth methods are: - client_secret_basic - client_secret_post - none Developers change the value of `` auth method CLIENT_AUTH_METHODS = [\"client_secret_basic\"]", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc8628\\endpoint.py", + "ast_data": "FunctionDef name:authenticate_client arg:self arg:request arguments arg arg Assign Call Assign Return return:yes" + }, + { + "library": "django", + "name": "SerializationError", + "source_code": "class SerializationError(Exception):\n pass", + "docstring": "Something bad happened during serialization.", + "type": "class", + "file_path": "django\\django\\core\\serializers\\base.py", + "ast_data": "ClassDef name:SerializationError" + }, + { + "library": "pytorch", + "name": "backward_transitive_closure", + "source_code": "def backward_transitive_closure(self, src: str) -> set[str]:\n result = set(src)\n working_set = deque(src)\n while len(working_set) > 0:\n cur = working_set.popleft()\n for n in self.predecessors(cur):\n if n not in result:\n result.add(n)\n working_set.append(n)\n return result", + "docstring": "Returns a set of nodes that are reachable from src in reverse direction", + "type": "method", + "file_path": "pytorch\\torch\\package\\_digraph.py", + "ast_data": "FunctionDef name:backward_transitive_closure arg:self arg:src arguments arg arg Assign Call Assign Call While Compare Call Assign Call For Call If Compare Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_variance_scale_term", + "source_code": "def _variance_scale_term(self):\n return math_ops.rsqrt(1.0 + self.total_concentration[..., array_ops.newaxis])", + "docstring": "Helper to and which computes a shared scale.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet.py", + "ast_data": "FunctionDef name:_variance_scale_term arg:self arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "to_onnx", + "source_code": "def to_onnx(self, onnx_name: Optional[str]=None, image_size: Optional[int]=640, include_pre_and_post_processor: bool=True, save: bool=True, additional_metadata: Optional[list[tuple[str, str]]]=None, **kwargs: Any) -> onnx.ModelProto:\n if onnx_name is None:\n onnx_name = f'kornia_{self.name}_{image_size}.onnx'\n return super().to_onnx(onnx_name, input_shape=[-1, 3, image_size or -1, image_size or -1], output_shape=[-1, -1, 6], pseudo_shape=[1, 3, image_size or 352, image_size or 352], model=self if include_pre_and_post_processor else self.model, save=save, additional_metadata=additional_metadata, **kwargs)", + "docstring": "Export an RT-DETR object detection model to ONNX format. Either or must be provided. If neither is provided, a default pretrained model () will be built. Args: onnx_name: The name of the output ONNX file. If not provided, a default name in the format \"Kornia-.onnx\" will be used. image_size: The size to which input images will be resized during preprocessing. If None, image_size will be dynamic. For RTDETR, recommended scales include [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]. include_pre_and_post_processor: Whether to include the pre-processor and post-processor in the exported model. save: If to save the model or load it. additional_metadata: Additional metadata to add to the ONNX model. kwargs: Additional arguments to convert to onnx.", + "type": "method", + "file_path": "kornia\\kornia\\models\\detection\\base.py", + "ast_data": "FunctionDef name:to_onnx arg:self arg:onnx_name arg:image_size arg:include_pre_and_post_processor arg:save arg:additional_metadata arguments arg arg arg arg arg arg arg If Compare Assign Return return:yes Call Call BoolOp BoolOp BoolOp BoolOp" + }, + { + "library": "django", + "name": "get_nodes_by_type", + "source_code": "def get_nodes_by_type(self, nodetype):\n nodes = []\n for node in self:\n nodes.extend(node.get_nodes_by_type(nodetype))\n return nodes", + "docstring": "Return a list of all nodes of the given type", + "type": "method", + "file_path": "django\\django\\template\\base.py", + "ast_data": "FunctionDef name:get_nodes_by_type arg:self arg:nodetype arguments arg arg Assign For Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_add_feature", + "source_code": "def _add_feature(self, key, feature):\n if isinstance(feature, VarLenFeature):\n self._add_varlen_feature(key, feature)\n elif isinstance(feature, SparseFeature):\n self._add_sparse_feature(key, feature)\n elif isinstance(feature, FixedLenFeature):\n self._add_fixed_len_feature(key, feature)\n elif isinstance(feature, FixedLenSequenceFeature):\n self._add_fixed_len_sequence_feature(key, feature)\n elif isinstance(feature, RaggedFeature):\n self._add_ragged_feature(key, feature)\n else:\n raise ValueError(f'Invalid feature {key}:{feature}.')", + "docstring": "Adds the specified feature to this ParseOpParams.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py", + "ast_data": "FunctionDef name:_add_feature arg:self arg:key arg:feature arguments arg arg arg If Call Call If Call Call If Call Call If Call Call If Call Call Raise Call" + }, + { + "library": "tensorflow", + "name": "smart_cond", + "source_code": "@tf_export('__internal__.smart_cond.smart_cond', v1=[])\ndef smart_cond(pred, true_fn=None, false_fn=None, name=None):\n if not callable(true_fn):\n raise TypeError(f'Argument `true_fn` must be callable. Received {true_fn}')\n if not callable(false_fn):\n raise TypeError(f'Argument `false_fn` must be callable. Received {false_fn}')\n pred_value = smart_constant_value(pred)\n if pred_value is not None:\n if pred_value:\n return true_fn()\n else:\n return false_fn()\n else:\n return cond.cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)", + "docstring": "Return either if predicate is true else . If is a bool or has a constant value, we return either or , otherwise we use to dynamically route to both. Args: pred: A scalar determining whether to return the result of or . true_fn: The callable to be performed if pred is true. false_fn: The callable to be performed if pred is false. name: Optional name prefix when using . Returns: Tensors returned by the call to either or . Raises: TypeError: If or is not callable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\smart_cond.py", + "ast_data": "FunctionDef name:smart_cond arg:pred arg:true_fn arg:false_fn arg:name arguments arg arg arg arg If Call Raise Call If Call Raise Call Assign Call If Compare If Return return:yes Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "f_ishigami", + "source_code": "def f_ishigami(x: 'npt.ArrayLike') -> 'npt.NDArray[np.inexact[Any]]':\n x = np.atleast_2d(x)\n f_eval = np.sin(x[0]) + 7 * np.sin(x[1]) ** 2 + 0.1 * x[2] ** 4 * np.sin(x[0])\n return f_eval", + "docstring": "Ishigami function. .. math:: Y(\\mathbf{x}) = \\sin x_1 + 7 \\sin^2 x_2 + 0.1 x_3^4 \\sin x_1 with :math:. Parameters ---------- x : array_like ([x1, x2, x3], n) Returns ------- f : array_like (n,) Function evaluation. References ---------- .. [1] Ishigami, T. and T. Homma. \"An importance quantification technique in uncertainty analysis for computer models.\" IEEE, :doi:, 1990.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_sensitivity_analysis.py", + "ast_data": "FunctionDef name:f_ishigami arg:x arguments arg Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_offset_transform", + "source_code": "def _offset_transform(self, renderer):\n return mtransforms.Affine2D().translate(*map(renderer.points_to_pixels, self._offset))", + "docstring": "Apply the offset to the given transform.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py", + "ast_data": "FunctionDef name:_offset_transform arg:self arg:renderer arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "_get_updates", + "source_code": "def _get_updates(self, grads):\n self.t += 1\n self.ms = [self.beta_1 * m + (1 - self.beta_1) * grad for m, grad in zip(self.ms, grads)]\n self.vs = [self.beta_2 * v + (1 - self.beta_2) * grad ** 2 for v, grad in zip(self.vs, grads)]\n self.learning_rate = self.learning_rate_init * np.sqrt(1 - self.beta_2 ** self.t) / (1 - self.beta_1 ** self.t)\n updates = [-self.learning_rate * m / (np.sqrt(v) + self.epsilon) for m, v in zip(self.ms, self.vs)]\n return updates", + "docstring": "Get the values used to update params with given gradients Parameters ---------- grads : list, length = len(coefs_) + len(intercepts_) Containing gradients with respect to coefs_ and intercepts_ in MLP model. So length should be aligned with params Returns ------- updates : list, length = len(grads) The values to add to params", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neural_network\\_stochastic_optimizers.py", + "ast_data": "FunctionDef name:_get_updates arg:self arg:grads arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "to_pytimedelta", + "source_code": "def to_pytimedelta(self) -> np.ndarray:\n warnings.warn(f'The behavior of {type(self).__name__}.to_pytimedelta is deprecated, in a future version this will return a Series containing python datetime.timedelta objects instead of an ndarray. To retain the old behavior, call `np.array` on the result', FutureWarning, stacklevel=find_stack_level())\n return self._get_values().to_pytimedelta()", + "docstring": "Return an array of native :class: objects. Python's standard library uses a different representation timedelta's. This method converts a Series of pandas Timedeltas to format with the same length as the original Series. Returns ------- numpy.ndarray Array of 1D containing data with type. See Also -------- datetime.timedelta : A duration expressing the difference between two date, time, or datetime. Examples -------- >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit=\"D\")) >>> s 0 0 days 1 1 days 2 2 days 3 3 days 4 4 days dtype: timedelta64[ns] >>> s.dt.to_pytimedelta() array([datetime.timedelta(0), datetime.timedelta(days=1), datetime.timedelta(days=2), datetime.timedelta(days=3), datetime.timedelta(days=4)], dtype=object)", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\accessors.py", + "ast_data": "FunctionDef name:to_pytimedelta arg:self arguments arg Call Call Call Return return:yes Call Call" + }, + { + "library": "seaborn", + "name": "_violin_bw_backcompat", + "source_code": "def _violin_bw_backcompat(self, bw, bw_method):\n if bw is not deprecated:\n bw_method = bw\n msg = dedent(f'\\n\\n The `bw` parameter is deprecated in favor of `bw_method`/`bw_adjust`.\\n Setting `bw_method={bw!r}`, but please see docs for the new parameters\\n and update your code. This will become an error in seaborn v0.15.0.\\n ')\n warnings.warn(msg, FutureWarning, stacklevel=3)\n return bw_method", + "docstring": "Provide two cycles of backcompat for violin bandwidth parameterization.", + "type": "method", + "file_path": "seaborn\\seaborn\\categorical.py", + "ast_data": "FunctionDef name:_violin_bw_backcompat arg:self arg:bw arg:bw_method arguments arg arg arg If Compare Assign Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "EstimatorModeKeys", + "source_code": "class EstimatorModeKeys(object):\n TRAIN = 'train'\n EVAL = 'eval'\n PREDICT = 'infer'", + "docstring": "Standard names for Estimator model modes. The following standard keys are defined: * : training/fitting mode. * : testing/evaluation mode. * : predication/inference mode.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\mode_keys.py", + "ast_data": "ClassDef name:EstimatorModeKeys Assign Assign Assign" + }, + { + "library": "scikit-learn", + "name": "GroupsConsumerMixin", + "source_code": "class GroupsConsumerMixin(_MetadataRequester):\n __metadata_request__split = {'groups': True}", + "docstring": "A Mixin to ``. .. versionadded:: 1.3", + "type": "class", + "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py", + "ast_data": "ClassDef name:GroupsConsumerMixin Assign" + }, + { + "library": "tensorflow", + "name": "visit_ImportFrom", + "source_code": "def visit_ImportFrom(self, node):\n if not node.module:\n self.generic_visit(node)\n return\n from_import = node.module\n for import_alias in node.names:\n full_module_name = '%s.%s' % (from_import, import_alias.name)\n full_import = (full_module_name, import_alias.asname)\n detection = self._api_analysis_spec.imports_to_detect.get(full_import, None)\n if detection:\n self.add_result(detection)\n self.add_log(detection.log_level, node.lineno, node.col_offset, detection.log_message)\n self.generic_visit(node)", + "docstring": "Handle visiting an import-from node in the AST. Args: node: Current Node", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py", + "ast_data": "FunctionDef name:visit_ImportFrom arg:self arg:node arguments arg arg If Call Return return:no Assign For Assign Assign Assign Call If Call Call Call" + }, + { + "library": "cryptography", + "name": "public_bytes", + "source_code": "@abc.abstractmethod\ndef public_bytes(self, encoding: _serialization.Encoding, format: _serialization.PublicFormat) -> bytes:\n pass", + "docstring": "Returns the key serialized as bytes.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py", + "ast_data": "FunctionDef name:public_bytes arg:self arg:encoding arg:format arguments arg arg arg" + }, + { + "library": "tensorflow", + "name": "graph", + "source_code": "@property\ndef graph(self):\n raise NotImplementedError('graph')", + "docstring": "The underlying TensorFlow graph, to be used in building Operations.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", + "ast_data": "FunctionDef name:graph arg:self arguments arg Raise Call" + }, + { + "library": "pytorch", + "name": "_get_slice_mesh_dims", + "source_code": "def _get_slice_mesh_dims(self, device_mesh, mesh_dim_names) -> list[tuple[int, ...]]:\n if device_mesh != self.get_root_mesh(device_mesh):\n raise RuntimeError('Cannot create a submesh from a submesh.')\n self.flatten_name_to_root_dims.setdefault(device_mesh, {})\n flatten_name_to_root_dims = self.flatten_name_to_root_dims[device_mesh]\n valid_mesh_dim_names = [*device_mesh.mesh_dim_names, *flatten_name_to_root_dims]\n if not all((mesh_dim_name in valid_mesh_dim_names for mesh_dim_name in mesh_dim_names)):\n raise KeyError(f'Invalid mesh_dim_names {mesh_dim_names} specified. Valid mesh_dim_names are {valid_mesh_dim_names}.')\n curr_idx = -1\n slice_mesh_dims = []\n for mesh_dim_name in mesh_dim_names:\n if mesh_dim_name in flatten_name_to_root_dims:\n mesh_indices = flatten_name_to_root_dims[mesh_dim_name]\n next_idx = mesh_indices[-1]\n slice_mesh_dims.append(mesh_indices)\n else:\n next_idx = device_mesh.mesh_dim_names.index(mesh_dim_name)\n slice_mesh_dims.append((next_idx,))\n if next_idx <= curr_idx:\n raise KeyError(f'Invalid mesh_dim_names {mesh_dim_names} specified. ', f'Found mesh dim indices to slice: {slice_mesh_dims}. ', 'Mesh dim indices should be in ascending order.')\n curr_idx = next_idx\n return slice_mesh_dims", + "docstring": "Validate whether the mesh_dim_names is valid for slicing the given device_mesh. If valid, return dim indexes of the slice mesh in the device mesh.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\device_mesh.py", + "ast_data": "FunctionDef name:_get_slice_mesh_dims arg:self arg:device_mesh arg:mesh_dim_names arguments arg arg arg If Compare Call Raise Call Call Assign Assign If Call Compare Raise Call Assign Assign For If Compare Assign Assign Call Assign Call Call If Compare Raise Call Assign Return return:yes" + }, + { + "library": "virtualenv", + "name": "close", + "source_code": "def close(self):\n pass", + "docstring": "Do nothing.", + "type": "method", + "file_path": "virtualenv\\src\\virtualenv\\app_data\\via_disk_folder.py", + "ast_data": "FunctionDef name:close arg:self arguments arg" + }, + { + "library": "scipy", + "name": "FitSolverError", + "source_code": "class FitSolverError(FitError):\n\n def __init__(self, mesg):\n emsg = 'Solver for the MLE equations failed to converge: '\n emsg += mesg.replace('\\n', '')\n self.args = (emsg,)", + "docstring": "Raised when a solver fails to converge while fitting a distribution.", + "type": "class", + "file_path": "scipy\\scipy\\stats\\_continuous_distns.py", + "ast_data": "ClassDef name:FitSolverError FunctionDef name:__init__ arg:self arg:mesg arguments arg arg Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "grad_fn", + "source_code": "def grad_fn(*args, **kwds):\n this_tape = tape.push_new_tape()\n try:\n end_node = f(*args, **kwds)\n if end_node is None:\n raise ValueError('Cannot differentiate a function that returns None; did you forget to return a value from {}?'.format(f.__name__))\n finally:\n tape.pop_tape(this_tape)\n variables = this_tape.watched_variables()\n if not variables:\n raise ValueError('No trainable variables were accessed while the function was being computed.')\n sources = [v.handle for v in variables]\n for s in sources:\n if getattr(s, 'is_packed', False):\n raise ValueError('GradientTape.gradient is not supported on packed EagerTensors yet.')\n grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node), sources)\n return (end_node, list(zip(grad, variables)))", + "docstring": "Computes the gradient of the wrapped function.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py", + "ast_data": "FunctionDef name:grad_fn arguments arg arg Assign Call Try Assign Call If Compare Raise Call Call Call Assign Call If Raise Call Assign For If Call Raise Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "debug_set_max_size", + "source_code": "def debug_set_max_size(value: int) -> None:\n global _MAX_SIZE\n _MAX_SIZE = value", + "docstring": "Sets the max size allowed for each proto chunk (used for debugging only). Args: value: int byte size", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\constants.py", + "ast_data": "FunctionDef name:debug_set_max_size arg:value arguments arg Assign" + }, + { + "library": "pytorch", + "name": "_autowrap_check", + "source_code": "def _autowrap_check(patcher: _Patcher, frame_dict: dict[str, Any], function_ids: set[int]):\n if patcher.visit_once(frame_dict):\n for name, value in frame_dict.items():\n if not name.startswith('_') and callable(value) and (id(value) in function_ids):\n patcher.patch(frame_dict, name, _create_wrapped_func(value))", + "docstring": "Some methods, like are common enough we want to automatically wrap them as we see them. This method searches a scope for them and patches them if found.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py", + "ast_data": "FunctionDef name:_autowrap_check arg:patcher arg:frame_dict arg:function_ids arguments arg arg arg If Call For Call If BoolOp Call Call Compare Call Call Call" + }, + { + "library": "tensorflow", + "name": "function_executor_type", + "source_code": "@tf_export('experimental.function_executor_type')\n@tf_contextlib.contextmanager\ndef function_executor_type(executor_type):\n current_options = context().function_call_options\n old_options = copy.copy(current_options)\n try:\n current_options.executor_type = executor_type\n yield\n finally:\n context().function_call_options = old_options", + "docstring": "Context manager for setting the executor of eager defined functions. Eager defined functions are functions decorated by tf.contrib.eager.defun. Args: executor_type: a string for the name of the executor to be used to execute functions defined by tf.contrib.eager.defun. Yields: Context manager for setting the executor of eager defined functions.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:function_executor_type arg:executor_type arguments arg Assign Call Assign Call Try Assign Assign Call Call" + }, + { + "library": "cherrypy", + "name": "check_site_config_entries_in_app_config", + "source_code": "def check_site_config_entries_in_app_config(self):\n for sn, app in cherrypy.tree.apps.items():\n if not isinstance(app, cherrypy.Application):\n continue\n msg = []\n for section, entries in app.config.items():\n if section.startswith('/'):\n for key, value in entries.items():\n for n in ('engine.', 'server.', 'tree.', 'checker.'):\n if key.startswith(n):\n msg.append('[%s] %s = %s' % (section, key, value))\n if msg:\n msg.insert(0, 'The application mounted at %r contains the following config entries, which are only allowed in site-wide config. Move them to a [global] section and pass them to cherrypy.config.update() instead of tree.mount().' % sn)\n warnings.warn(os.linesep.join(msg))", + "docstring": "Check for mounted Applications that have site-scoped config.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpchecker.py", + "ast_data": "FunctionDef name:check_site_config_entries_in_app_config arg:self arguments arg For Call If Call Assign For Call If Call For Call For If Call Call If Call Call Call" + }, + { + "library": "matplotlib", + "name": "vertices", + "source_code": "@property\ndef vertices(self):\n self._revalidate_path()\n return self._cached_vertices", + "docstring": "Return the cached path after updating it if necessary.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\textpath.py", + "ast_data": "FunctionDef name:vertices arg:self arguments arg Call Return return:yes" + }, + { + "library": "pygame", + "name": "create_aliases", + "source_code": "def create_aliases():\n alias_groups = (('monospace', 'misc-fixed', 'courier', 'couriernew', 'console', 'fixed', 'mono', 'freemono', 'bitstreamverasansmono', 'verasansmono', 'monotype', 'lucidaconsole', 'consolas', 'dejavusansmono', 'liberationmono'), ('sans', 'arial', 'helvetica', 'swiss', 'freesans', 'bitstreamverasans', 'verasans', 'verdana', 'tahoma', 'calibri', 'gillsans', 'segoeui', 'trebuchetms', 'ubuntu', 'dejavusans', 'liberationsans'), ('serif', 'times', 'freeserif', 'bitstreamveraserif', 'roman', 'timesroman', 'timesnewroman', 'dutch', 'veraserif', 'georgia', 'cambria', 'constantia', 'dejavuserif', 'liberationserif'), ('wingdings', 'wingbats'), ('comicsansms', 'comicsans'))\n for alias_set in alias_groups:\n for name in alias_set:\n if name in Sysfonts:\n found = Sysfonts[name]\n break\n else:\n continue\n for name in alias_set:\n if name not in Sysfonts:\n Sysalias[name] = found", + "docstring": "Map common fonts that are absent from the system to similar fonts that are installed in the system", + "type": "function", + "file_path": "pygame\\src_py\\sysfont.py", + "ast_data": "FunctionDef name:create_aliases arguments Assign For For If Compare Assign For If Compare Assign" + }, + { + "library": "tensorflow", + "name": "_unshard_from_sc_to_cpu", + "source_code": "def _unshard_from_sc_to_cpu(stacked_table: tensor.Tensor, from_shard_layouts: Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]) -> Sequence[tensor.Tensor]:\n logging.vlog(1, 'To unshuffle_from_sc_to_cpu on stacked_table.shape: %s', stacked_table[0].shape)\n ret_tensors = []\n for layout in from_shard_layouts:\n padded_table = tpu_embedding_v3_utils.unshuffle_from_sc_to_cpu(stacked_table[0], num_sparse_cores=layout.num_sparse_cores, offset_in_shard=layout.sparse_core_shard_row_offset, size_in_shard=layout.unsharded_padded_shape[0] // layout.num_sparse_cores, shard_rotation=layout.sparse_core_shard_rotation)\n orig_table = tpu_embedding_v3_utils.remove_padding_from_sc(padded_table, layout.unsharded_shape)\n logging.vlog(1, 'orig_tensors.shape[%s]: %s', layout.table_name, orig_table.shape)\n ret_tensors.append(orig_table)\n return ret_tensors", + "docstring": "Undo the shard the feature tables into SparseCore stacked table. Args: stacked_table: The value of a SparseCore stacked and sharded table. from_shard_layouts: The target layouts for the target hardware. Returns: The unsharded feature tables.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_checkpoint_adapter.py", + "ast_data": "FunctionDef name:_unshard_from_sc_to_cpu arg:stacked_table arg:from_shard_layouts arguments arg arg Call Assign For Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_close", + "source_code": "@abc.abstractmethod\ndef _close(self, death_sig: signal.Signals, timeout: int=30) -> None:\n raise NotImplementedError", + "docstring": "Terminates all processes managed by this context and cleans up any meta resources (e.g. redirect, error_file files).", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py", + "ast_data": "FunctionDef name:_close arg:self arg:death_sig arg:timeout arguments arg arg arg Raise" + }, + { + "library": "pytorch", + "name": "_get_ignored_buffer_names", + "source_code": "def _get_ignored_buffer_names(root_module: torch.nn.Module, ignored_modules: set[torch.nn.Module]) -> set[str]:\n all_ignored_buffer_names: set[str] = set()\n buffers_in_ignored_modules = {buffer for m in ignored_modules for buffer in m.buffers()}\n all_ignored_buffer_names.update({clean_tensor_name(buffer_name) for buffer_name, buffer in root_module.named_buffers() if buffer in buffers_in_ignored_modules})\n for submodule in root_module.modules():\n optional_fsdp_state = _get_module_fsdp_state(submodule)\n if optional_fsdp_state is not None:\n assert hasattr(optional_fsdp_state, '_ignored_buffer_names')\n all_ignored_buffer_names.update(optional_fsdp_state._ignored_buffer_names)\n return all_ignored_buffer_names", + "docstring": "Return the cleaned buffer FQNs in ``.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py", + "ast_data": "FunctionDef name:_get_ignored_buffer_names arg:root_module arg:ignored_modules arguments arg arg Call Assign Call Call Call Call Compare For Call Assign Call If Compare Call Call Return return:yes" + }, + { + "library": "django", + "name": "get_templatetag_libraries", + "source_code": "def get_templatetag_libraries(self, custom_libraries):\n libraries = get_installed_libraries()\n libraries.update(custom_libraries)\n return libraries", + "docstring": "Return a collation of template tag libraries from installed applications and the supplied custom_libraries argument.", + "type": "method", + "file_path": "django\\django\\template\\backends\\django.py", + "ast_data": "FunctionDef name:get_templatetag_libraries arg:self arg:custom_libraries arguments arg arg Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "__add__", + "source_code": "def __add__(self, rhs):\n if isinstance(rhs, str):\n t = super().__add__(rhs)\n if isinstance(rhs, SafeData):\n t = SafeString(t)\n return t\n return NotImplemented", + "docstring": "Concatenating a safe string with another safe bytestring or safe string is safe. Otherwise, the result is no longer safe.", + "type": "method", + "file_path": "django\\django\\utils\\safestring.py", + "ast_data": "FunctionDef name:__add__ arg:self arg:rhs arguments arg arg If Call Assign Call Call If Call Assign Call Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "hamming", + "source_code": "def hamming(u, v, w=None):\n u = _validate_vector(u)\n v = _validate_vector(v)\n if u.shape != v.shape:\n raise ValueError('The 1d arrays must have equal lengths.')\n u_ne_v = u != v\n if w is not None:\n w = _validate_weights(w)\n if w.shape != u.shape:\n raise ValueError(\"'w' should have the same length as 'u' and 'v'.\")\n w = w / w.sum()\n return np.dot(u_ne_v, w)\n return np.mean(u_ne_v)", + "docstring": "Compute the Hamming distance between two 1-D arrays. The Hamming distance between 1-D arrays and , is simply the proportion of disagreeing components in and . If and are boolean vectors, the Hamming distance is .. math:: \\frac{c_{01} + c_{10}}{n} where :math: is the number of occurrences of :math: and :math: for :math:`k >> from scipy.spatial import distance >>> distance.hamming([1, 0, 0], [0, 1, 0]) 0.66666666666666663 >>> distance.hamming([1, 0, 0], [1, 1, 0]) 0.33333333333333331 >>> distance.hamming([1, 0, 0], [2, 0, 0]) 0.33333333333333331 >>> distance.hamming([1, 0, 0], [3, 0, 0]) 0.33333333333333331", + "type": "function", + "file_path": "scipy\\scipy\\spatial\\distance.py", + "ast_data": "FunctionDef name:hamming arg:u arg:v arg:w arguments arg arg arg Assign Call Assign Call If Compare Raise Call Assign Compare If Compare Assign Call If Compare Raise Call Assign Call Return return:yes Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "savez_compressed", + "source_code": "@array_function_dispatch(_savez_compressed_dispatcher)\ndef savez_compressed(file, *args, allow_pickle=True, **kwds):\n _savez(file, args, kwds, True, allow_pickle=allow_pickle)", + "docstring": "Save several arrays into a single file in compressed `arr_0arr_1kwdsnumpy.lib.formatload~lib.npyio.NpzFile` attribute), and for the arrays themselves. Examples -------- >>> import numpy as np >>> test_array = np.random.rand(3, 2) >>> test_vector = np.random.rand(4) >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) >>> loaded = np.load('/tmp/123.npz') >>> print(np.array_equal(test_array, loaded['a'])) True >>> print(np.array_equal(test_vector, loaded['b'])) True", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_npyio_impl.py", + "ast_data": "FunctionDef name:savez_compressed arg:file arguments arg arg arg arg Call Call" + }, + { + "library": "kornia", + "name": "weight_init", + "source_code": "def weight_init(m: nn.Module) -> None:\n if isinstance(m, (nn.Conv2d,)):\n torch.nn.init.xavier_normal_(m.weight, gain=1.0)\n if m.weight.data.shape[1] == torch.Size([1]):\n torch.nn.init.normal_(m.weight, mean=0.0)\n if m.bias is not None:\n torch.nn.init.zeros_(m.bias)\n if isinstance(m, (nn.ConvTranspose2d,)):\n torch.nn.init.xavier_normal_(m.weight, gain=1.0)\n if m.weight.data.shape[1] == torch.Size([1]):\n torch.nn.init.normal_(m.weight, std=0.1)\n if m.bias is not None:\n torch.nn.init.zeros_(m.bias)", + "docstring": "Initialize weights.", + "type": "function", + "file_path": "kornia\\kornia\\filters\\dexined.py", + "ast_data": "FunctionDef name:weight_init arg:m arguments arg If Call Call If Compare Call Call If Compare Call If Call Call If Compare Call Call If Compare Call" + }, + { + "library": "django", + "name": "NumericPasswordValidator", + "source_code": "class NumericPasswordValidator:\n\n def validate(self, password, user=None):\n if password.isdigit():\n raise ValidationError(self.get_error_message(), code='password_entirely_numeric')\n\n def get_error_message(self):\n return _('This password is entirely numeric.')\n\n def get_help_text(self):\n return _('Your password can’t be entirely numeric.')", + "docstring": "Validate that the password is not entirely numeric.", + "type": "class", + "file_path": "django\\django\\contrib\\auth\\password_validation.py", + "ast_data": "ClassDef name:NumericPasswordValidator FunctionDef name:validate arg:self arg:password arg:user arguments arg arg arg If Call Raise Call Call FunctionDef name:get_error_message arg:self arguments arg Return return:yes Call FunctionDef name:get_help_text arg:self arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "db_default_sql", + "source_code": "def db_default_sql(self, field):\n from django.db.models.expressions import Value\n db_default = field._db_default_expression\n sql = self._column_default_sql(field) if isinstance(db_default, Value) else '(%s)'\n query = Query(model=field.model)\n compiler = query.get_compiler(connection=self.connection)\n default_sql, params = compiler.compile(db_default)\n if self.connection.features.requires_literal_defaults:\n default_sql %= tuple((self.prepare_default(p) for p in params))\n params = []\n return (sql % default_sql, params)", + "docstring": "Return the sql and params for the field's database default.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\schema.py", + "ast_data": "FunctionDef name:db_default_sql arg:self arg:field arguments arg arg Assign Assign Call Call Assign Call Assign Call Assign Call If Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "HierarchicalCopyAllReduce", + "source_code": "@tf_export('distribute.HierarchicalCopyAllReduce')\nclass HierarchicalCopyAllReduce(AllReduceCrossDeviceOps):\n\n def __init__(self, num_packs=1):\n if num_packs < 0:\n raise ValueError('HierarchicalCopy requires num_packs >= 0, but {} is specified'.format(num_packs))\n super(HierarchicalCopyAllReduce, self).__init__(all_reduce_alg='hierarchical_copy', num_packs=num_packs)", + "docstring": "Hierarchical copy all-reduce implementation of CrossDeviceOps. It reduces to one GPU along edges in some hierarchy and broadcasts back to each GPU along the same path. For the batch API, tensors will be repacked or aggregated for more efficient cross-device transportation. This is a reduction created for Nvidia DGX-1 which assumes GPUs connects like that on DGX-1 machine. If you have different GPU inter-connections, it is likely that it would be slower than . For reduces that are not all-reduce, it falls back to . Here is how you can use in :", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py", + "ast_data": "ClassDef name:HierarchicalCopyAllReduce FunctionDef name:__init__ arg:self arg:num_packs arguments arg arg If Compare Raise Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "is_storage", + "source_code": "def is_storage(obj: _Any, /) -> _TypeIs[_Union['TypedStorage', 'UntypedStorage']]:\n return type(obj) in _storage_classes", + "docstring": "Returns True if is a PyTorch storage object. Args: obj (Object): Object to test", + "type": "function", + "file_path": "pytorch\\torch\\__init__.py", + "ast_data": "FunctionDef name:is_storage arguments arg Return return:yes Compare Call" + }, + { + "library": "pytorch", + "name": "replace_all_batch_norm_modules_", + "source_code": "@exposed_in('torch.func')\ndef replace_all_batch_norm_modules_(root: nn.Module) -> nn.Module:\n batch_norm_without_running_stats(root)\n for obj in root.modules():\n batch_norm_without_running_stats(obj)\n return root", + "docstring": "In place updates :attr: by setting the `root`", + "type": "function", + "file_path": "pytorch\\torch\\_functorch\\batch_norm_replacement.py", + "ast_data": "FunctionDef name:replace_all_batch_norm_modules_ arg:root arguments arg Call For Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "_set_list", + "source_code": "def _set_list(self, length, items):\n prev_ptr = self.ptr\n srid = self.srid\n self.ptr = self._create_collection(length, items)\n if srid:\n self.srid = srid\n capi.destroy_geom(prev_ptr)", + "docstring": "Create a new collection, and destroy the contents of the previous pointer.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\collections.py", + "ast_data": "FunctionDef name:_set_list arg:self arg:length arg:items arguments arg arg arg Assign Assign Assign Call If Assign Call" + }, + { + "library": "django", + "name": "to_python", + "source_code": "def to_python(self, value):\n return value", + "docstring": "Convert the input value into the expected Python data type, raising django.core.exceptions.ValidationError if the data can't be converted. Return the converted value. Subclasses should override this.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\__init__.py", + "ast_data": "FunctionDef name:to_python arg:self arg:value arguments arg arg Return return:yes" + }, + { + "library": "scrapy", + "name": "short_desc", + "source_code": "def short_desc(self) -> str:\n return ''", + "docstring": "A short description of the command", + "type": "method", + "file_path": "scrapy\\scrapy\\commands\\__init__.py", + "ast_data": "FunctionDef name:short_desc arg:self arguments arg Return return:yes" + }, + { + "library": "seaborn", + "name": "_add_axis_labels", + "source_code": "def _add_axis_labels(self):\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)", + "docstring": "Add labels to the left and bottom Axes.", + "type": "method", + "file_path": "seaborn\\seaborn\\axisgrid.py", + "ast_data": "FunctionDef name:_add_axis_labels arg:self arguments arg For Call Call For Call Call" + }, + { + "library": "tensorflow", + "name": "_block_orth", + "source_code": "def _block_orth(self, projection_matrix):\n n = projection_matrix.shape.as_list()[0]\n kernel = {}\n eye = linalg_ops_impl.eye(n, dtype=self.dtype)\n kernel[0] = projection_matrix\n kernel[1] = eye - projection_matrix\n return kernel", + "docstring": "Construct a kernel. Used to construct orthgonal kernel. Args: projection_matrix: A symmetric projection matrix of size n x n. Returns: [projection_matrix, (1 - projection_matrix)].", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py", + "ast_data": "FunctionDef name:_block_orth arg:self arg:projection_matrix arguments arg arg Assign Call Assign Assign Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_with_inner_rank", + "source_code": "def _with_inner_rank(self, inner_rank):\n rank = self.rank\n if rank is None:\n raise ValueError('Rank must be known to adjust inner_rank')\n elif rank < 2:\n if inner_rank == rank:\n return self\n raise ValueError('Cannot change inner_rank if rank < 2')\n else:\n new_num_row_partitions = rank - inner_rank\n return self._with_num_row_partitions(new_num_row_partitions)", + "docstring": "Returns the same shape but a different inner_rank. All dimensions that are to be represented in the inner_shape must be dense. See inner_rank. Args: inner_rank: the new inner_rank of the shape. Returns: the same shape but a different inner_rank Raises: ValueError if the new dense rank is invalid, or the old rank is unknown.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:_with_inner_rank arg:self arg:inner_rank arguments arg arg Assign If Compare Raise Call If Compare If Compare Return return:yes Raise Call Assign Return return:yes Call" + }, + { + "library": "sphinx", + "name": "load_builtin_themes", + "source_code": "def load_builtin_themes(self, config: Config) -> None:\n self.themes['manual'] = BuiltInTheme('manual', config)\n self.themes['howto'] = BuiltInTheme('howto', config)", + "docstring": "Load built-in themes.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\latex\\theming.py", + "ast_data": "FunctionDef name:load_builtin_themes arg:self arg:config arguments arg arg Assign Call Assign Call" + }, + { + "library": "scipy", + "name": "estimate_error", + "source_code": "def estimate_error(self, f, a, b, args=()):\n est = self.estimate(f, a, b, args)\n refined_est = 0\n for a_k, b_k in _split_subregion(a, b):\n refined_est += self.estimate(f, a_k, b_k, args)\n return self.xp.abs(est - refined_est)", + "docstring": "Estimate the error of the approximation for the integral of in rectangular region described by corners and . If a subclass does not override this method, then a default error estimator is used. This estimates the error as `ffxestimateffest`.", + "type": "method", + "file_path": "scipy\\scipy\\integrate\\_rules\\_base.py", + "ast_data": "FunctionDef name:estimate_error arg:self arg:f arg:a arg:b arg:args arguments arg arg arg arg arg Assign Call Assign For Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "categorical_crossentropy", + "source_code": "@dispatch.add_dispatch_support\ndef categorical_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0, axis=-1):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n label_smoothing = tensor_conversion.convert_to_tensor_v2_with_dispatch(label_smoothing, dtype=backend.floatx())\n\n def _smooth_labels():\n num_classes = math_ops.cast(array_ops.shape(y_true)[-1], y_pred.dtype)\n return y_true * (1.0 - label_smoothing) + label_smoothing / num_classes\n y_true = smart_cond.smart_cond(label_smoothing, _smooth_labels, lambda: y_true)\n return backend.categorical_crossentropy(y_true, y_pred, from_logits=from_logits, axis=axis)", + "docstring": "Computes the categorical crossentropy loss. Standalone usage: >>> y_true = [[0, 1, 0], [0, 0, 1]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.0513, 2.303], dtype=float32) Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. from_logits: Whether is expected to be a logits tensor. By default, we assume that encodes a probability distribution. label_smoothing: Float in [0, 1]. If > then smooth the labels. For example, if , use for non-target labels and for target labels. axis: Defaults to -1. The dimension along which the entropy is computed. Returns: Categorical crossentropy loss value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:categorical_crossentropy arg:y_true arg:y_pred arg:from_logits arg:label_smoothing arg:axis arguments arg arg arg arg arg Assign Call Assign Call Assign Call Call FunctionDef name:_smooth_labels arguments Assign Call Call Return return:yes Assign Call arguments Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_linestyle", + "source_code": "def set_linestyle(self, ls):\n self._shared_setter('linestyle', ls)", + "docstring": "Set the linestyle of the rectangle and the connectors. ======================================================= ================ linestyle description ======================================================= ================ `` is an even length tuple of on and off ink in points. Parameters ---------- ls : {'-', '--', '-.', ':', '', (offset, on-off-seq), ...} The line style.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\inset.py", + "ast_data": "FunctionDef name:set_linestyle arg:self arg:ls arguments arg arg Call" + }, + { + "library": "django", + "name": "intersects", + "source_code": "def intersects(self, other):\n return self._topology(capi.ogr_intersects, other)", + "docstring": "Return True if this geometry intersects with the other.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:intersects arg:self arg:other arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "Initializer", + "source_code": "class Initializer:\n\n def __call__(self, shape, dtype=None, **kwargs):\n raise NotImplementedError\n\n def get_config(self):\n return {}\n\n @classmethod\n def from_config(cls, config):\n config.pop('dtype', None)\n return cls(**config)\n\n def _validate_kwargs(self, kwargs, support_partition=True):\n for kwarg in kwargs:\n if kwarg not in [_PARTITION_SHAPE, _PARTITION_OFFSET]:\n raise TypeError(f'Keyword argument should be one of {list([_PARTITION_SHAPE, _PARTITION_OFFSET])}. Received: {kwarg}')\n elif not support_partition:\n raise ValueError(f\"{self.__class__.__name__} initializer doesn't support partition-related arguments\")", + "docstring": "Initializer base class: all initializers inherit from this class. Initializers should implement a method with the following signature:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py", + "ast_data": "ClassDef name:Initializer FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Raise FunctionDef name:get_config arg:self arguments arg Return return:no FunctionDef name:from_config arg:cls arg:config arguments arg arg Call Return return:yes Call FunctionDef name:_validate_kwargs arg:self arg:kwargs arg:support_partition arguments arg arg arg For If Compare Raise Call Call If Raise Call" + }, + { + "library": "cryptography", + "name": "__eq__", + "source_code": "@abc.abstractmethod\ndef __eq__(self, other: object) -> bool:\n pass", + "docstring": "Checks equality.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py", + "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg" + }, + { + "library": "tensorflow", + "name": "_prep_cli_for_run_start", + "source_code": "def _prep_cli_for_run_start(self):\n self._run_cli = ui_factory.get_ui(self._ui_type, config=self._config)\n help_intro = debugger_cli_common.RichTextLines([])\n if self._run_call_count == 1:\n help_intro.extend(cli_shared.get_tfdbg_logo())\n help_intro.extend(debugger_cli_common.get_tensorflow_version_lines())\n help_intro.extend(debugger_cli_common.RichTextLines('Upcoming run:'))\n help_intro.extend(self._run_info)\n self._run_cli.set_help_intro(help_intro)\n self._title = 'run-start: ' + self._run_description\n self._init_command = 'run_info'\n self._title_color = 'blue_on_white'", + "docstring": "Prepare (but not launch) the CLI for run-start.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\local_cli_wrapper.py", + "ast_data": "FunctionDef name:_prep_cli_for_run_start arg:self arguments arg Assign Call Assign Call If Compare Call Call Call Call Call Call Call Call Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "_compute_static_batch_dim", + "source_code": "def _compute_static_batch_dim(self):\n new_batch_dim = tensor_util.constant_value(self._batch_sizes)\n if new_batch_dim is None:\n return None\n if isinstance(new_batch_dim, np.ndarray):\n if len(new_batch_dim.shape) == 1:\n if np.all(new_batch_dim == new_batch_dim[0]):\n new_batch_dim = new_batch_dim[0]\n else:\n return None\n elif len(new_batch_dim.shape) > 1:\n raise ValueError(f'Invalid `batch_sizes`. Expected `batch_sizes` to be a scalar or a vector. Received `batch_sizes` of rank {len(new_batch_dim.shape)}.')\n if self._may_form_partial_batches(new_batch_dim):\n return None\n return new_batch_dim", + "docstring": "Computes the static batch dimension of a dataset if it can be determined. Given the RebatchDataset parameters, determines the batch dimension of this dataset statically. Returns None if this cannot be determined or is variable. Returns: An integer representing the batch dimension of the dataset. If it cannot be determined statically, returns None. Raises: ValueError: The batch_sizes parameter is malformed, input_dataset is not batched, or input_dataset batch sizes are incompatible with each other.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\rebatch_op.py", + "ast_data": "FunctionDef name:_compute_static_batch_dim arg:self arguments arg Assign Call If Compare Return return:no If Call If Compare Call If Call Compare Assign Return return:no If Compare Call Raise Call Call If Call Return return:no Return return:yes" + }, + { + "library": "tensorflow", + "name": "zeros_cache", + "source_code": "def zeros_cache(self):\n return _tensor_caches_map[self._id].zeros_cache", + "docstring": "Per-device cache for scalars.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:zeros_cache arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "handle", + "source_code": "@property\ndef handle(self):\n return self._handle", + "docstring": "For compatibility; handles are not meaningful when eager is enabled.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:handle arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "weight_init", + "source_code": "def weight_init(model: Module) -> None:\n torch.nn.init.orthogonal_(model.conv1.weight, torch.nn.init.calculate_gain('relu'))\n torch.nn.init.orthogonal_(model.conv2.weight, torch.nn.init.calculate_gain('relu'))\n torch.nn.init.orthogonal_(model.conv3.weight, torch.nn.init.calculate_gain('relu'))\n torch.nn.init.orthogonal_(model.conv4.weight)", + "docstring": "Initialize model weights.", + "type": "function", + "file_path": "kornia\\kornia\\models\\super_resolution\\small_sr.py", + "ast_data": "FunctionDef name:weight_init arg:model arguments arg Call Call Call Call Call Call Call" + }, + { + "library": "numpy", + "name": "hermeadd", + "source_code": "def hermeadd(c1, c2):\n return pu._add(c1, c2)", + "docstring": "Add one Hermite series to another. Returns the sum of two Hermite series + . The arguments are sequences of coefficients ordered from lowest order term to highest, i.e., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the Hermite series of their sum. See Also -------- hermesub, hermemulx, hermemul, hermediv, hermepow Notes ----- Unlike multiplication, division, etc., the sum of two Hermite series is a Hermite series (without having to \"reproject\" the result onto the basis set) so addition, just like that of \"standard\" polynomials, is simply \"component-wise.\" Examples -------- >>> from numpy.polynomial.hermite_e import hermeadd >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) array([2., 4., 6., 4.])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\hermite_e.py", + "ast_data": "FunctionDef name:hermeadd arg:c1 arg:c2 arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_draggable", + "source_code": "def set_draggable(self, state, use_blit=False, update='loc'):\n if state:\n if self._draggable is None:\n self._draggable = DraggableLegend(self, use_blit, update=update)\n else:\n if self._draggable is not None:\n self._draggable.disconnect()\n self._draggable = None\n return self._draggable", + "docstring": "Enable or disable mouse dragging support of the legend. Parameters ---------- state : bool Whether mouse dragging is enabled. use_blit : bool, optional Use blitting for faster image composition. For details see :ref:. update : {'loc', 'bbox'}, optional The legend parameter to be changed when dragged: - 'loc': update the *loc* parameter of the legend - 'bbox': update the *bbox_to_anchor* parameter of the legend Returns ------- or *None* If *state* is `.DraggableLegend` helper instance. Otherwise this returns *None*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\legend.py", + "ast_data": "FunctionDef name:set_draggable arg:self arg:state arg:use_blit arg:update arguments arg arg arg arg If If Compare Assign Call If Compare Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "_select_by_peak_threshold", + "source_code": "def _select_by_peak_threshold(x, peaks, tmin, tmax):\n stacked_thresholds = np.vstack([x[peaks] - x[peaks - 1], x[peaks] - x[peaks + 1]])\n keep = np.ones(peaks.size, dtype=bool)\n if tmin is not None:\n min_thresholds = np.min(stacked_thresholds, axis=0)\n keep &= tmin <= min_thresholds\n if tmax is not None:\n max_thresholds = np.max(stacked_thresholds, axis=0)\n keep &= max_thresholds <= tmax\n return (keep, stacked_thresholds[0], stacked_thresholds[1])", + "docstring": "Evaluate which peaks fulfill the threshold condition. Parameters ---------- x : ndarray A 1-D array which is indexable by . peaks : ndarray Indices of peaks in . tmin, tmax : scalar or ndarray or None Minimal and / or maximal required thresholds. If supplied as ndarrays their size must match . `peakspeak` containing the thresholds of each peak on both sides. Notes ----- .. versionadded:: 1.1.0", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_peak_finding.py", + "ast_data": "FunctionDef name:_select_by_peak_threshold arg:x arg:peaks arg:tmin arg:tmax arguments arg arg arg arg Assign Call Assign Call If Compare Assign Call Compare If Compare Assign Call Compare Return return:yes" + }, + { + "library": "kornia", + "name": "unproject_points", + "source_code": "def unproject_points(point_2d: torch.Tensor, depth: torch.Tensor, camera_matrix: torch.Tensor, normalize: bool=False) -> torch.Tensor:\n if not isinstance(depth, torch.Tensor):\n raise TypeError(f'Input depth type is not a torch.Tensor. Got {type(depth)}')\n if not depth.shape[-1] == 1:\n raise ValueError(f'Input depth must be in the shape of (*, 1). Got {depth.shape}')\n xy: torch.Tensor = normalize_points_with_intrinsics(point_2d, camera_matrix)\n xyz: torch.Tensor = convert_points_to_homogeneous(xy)\n if normalize:\n xyz = F.normalize(xyz, dim=-1, p=2.0)\n return xyz * depth", + "docstring": "Unproject a 2d point in 3d. Transform coordinates in the pixel frame to the camera frame. Args: point_2d: tensor containing the 2d to be projected to world coordinates. The shape of the tensor can be :math:. depth: tensor containing the depth value of each 2d points. The tensor shape must be equal to point2d :math:. camera_matrix: tensor containing the intrinsics camera matrix. The tensor shape must be :math:. normalize: whether to normalize the pointcloud. This must be set to when the depth is represented as the Euclidean ray length from the camera position. Returns: tensor of (x, y, z) world coordinates with shape :math:. Example: >>> _ = torch.manual_seed(0) >>> x = torch.rand(1, 2) >>> depth = torch.ones(1, 1) >>> K = torch.eye(3)[None] >>> unproject_points(x, depth, K) tensor([[0.4963, 0.7682, 1.0000]])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\camera\\perspective.py", + "ast_data": "FunctionDef name:unproject_points arg:point_2d arg:depth arg:camera_matrix arg:normalize arguments arg arg arg arg If Call Raise Call Call If Compare Raise Call Call Call If Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "to_values", + "source_code": "def to_values(self):\n mtx = self.get_matrix()\n return tuple(mtx[:2].swapaxes(0, 1).flat)", + "docstring": "Return the values of the matrix as an `` tuple.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:to_values arg:self arguments arg Assign Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "normalize_laf", + "source_code": "def normalize_laf(LAF: Tensor, images: Tensor) -> Tensor:\n KORNIA_CHECK_LAF(LAF)\n _, _, h, w = images.size()\n wf = float(w - 1)\n hf = float(h - 1)\n min_size = min(hf, wf)\n coef = torch.ones(1, 1, 2, 3, dtype=LAF.dtype, device=LAF.device) / min_size\n coef[0, 0, 0, 2] = 1.0 / wf\n coef[0, 0, 1, 2] = 1.0 / hf\n return coef.expand_as(LAF) * LAF", + "docstring": "Normalize LAFs to [0,1] scale from pixel scale. See below: B,N,H,W = images.size() MIN_SIZE = min(H - 1, W -1) [a11 a21 x] [a21 a22 y] becomes: [a11/MIN_SIZE a21/MIN_SIZE x/(W-1)] [a21/MIN_SIZE a22/MIN_SIZE y/(H-1)] Args: LAF: :math: images: :math: Returns: the denormalized LAF: :math:, scale in image percentage (0, 1)", + "type": "function", + "file_path": "kornia\\kornia\\feature\\laf.py", + "ast_data": "FunctionDef name:normalize_laf arg:LAF arg:images arguments arg arg Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_oom_event", + "source_code": "def _oom_event(self, symptoms):\n if not symptoms:\n return False\n for symptom in reversed(symptoms):\n if symptom['symptomType'] != 'OUT_OF_MEMORY':\n continue\n oom_datetime_str = symptom['createTime'].split('.')[0]\n oom_datetime = datetime.datetime.strptime(oom_datetime_str, '%Y-%m-%dT%H:%M:%S')\n time_diff = _utcnow() - oom_datetime\n if time_diff < datetime.timedelta(seconds=_OOM_EVENT_COOL_TIME_SEC):\n logging.warning(self._symptom_msg('a recent runtime OOM has occurred ~{} seconds ago. The model script will terminate automatically. To prevent future OOM events, please consider reducing the model size. To disable this behavior, set flag --runtime_oom_exit=false when starting the script.'.format(time_diff.seconds)))\n return True\n return False", + "docstring": "Check if a runtime OOM event is reported.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py", + "ast_data": "FunctionDef name:_oom_event arg:self arg:symptoms arguments arg arg If Return return:yes For Call If Compare Assign Call Assign Call Assign Call If Compare Call Call Call Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "_findoffset", + "source_code": "def _findoffset(self, width, height, xdescent, ydescent, renderer):\n if self._loc == 0:\n x, y = self._find_best_position(width, height, renderer)\n elif self._loc in Legend.codes.values():\n bbox = Bbox.from_bounds(0, 0, width, height)\n x, y = self._get_anchored_bbox(self._loc, bbox, self.get_bbox_to_anchor(), renderer)\n else:\n fx, fy = self._loc\n bbox = self.get_bbox_to_anchor()\n x, y = (bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy)\n return (x + xdescent, y + ydescent)", + "docstring": "Helper function to locate the legend.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\legend.py", + "ast_data": "FunctionDef name:_findoffset arg:self arg:width arg:height arg:xdescent arg:ydescent arg:renderer arguments arg arg arg arg arg arg If Compare Assign Call If Compare Call Assign Call Assign Call Call Assign Assign Call Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "wait", + "source_code": "def wait(self, key, timeout=5, debug=False):\n value = self.get(key)\n if isinstance(value, threading.Event):\n if timeout is None:\n if debug:\n cherrypy.log('No timeout', 'TOOLS.CACHING')\n return None\n if debug:\n cherrypy.log('Waiting up to %s seconds' % timeout, 'TOOLS.CACHING')\n value.wait(timeout)\n if value.result is not None:\n if debug:\n cherrypy.log('Result!', 'TOOLS.CACHING')\n return value.result\n if debug:\n cherrypy.log('Timed out', 'TOOLS.CACHING')\n e = threading.Event()\n e.result = None\n dict.__setitem__(self, key, e)\n return None\n elif value is None:\n if debug:\n cherrypy.log('Timed out', 'TOOLS.CACHING')\n e = threading.Event()\n e.result = None\n dict.__setitem__(self, key, e)\n return value", + "docstring": "Return the cached value for the given key, or None. If timeout is not None, and the value is already being calculated by another thread, wait until the given timeout has elapsed. If the value is available before the timeout expires, it is returned. If not, None is returned, and a sentinel placed in the cache to signal other threads to wait. If timeout is None, no waiting is performed nor sentinels used.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\caching.py", + "ast_data": "FunctionDef name:wait arg:self arg:key arg:timeout arg:debug arguments arg arg arg arg Assign Call If Call If Compare If Call Return return:no If Call Call If Compare If Call Return return:yes If Call Assign Call Assign Call Return return:no If Compare If Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "reduce_max", + "source_code": "@dispatch.dispatch_for_api(math_ops.reduce_max)\ndef reduce_max(input_tensor: ragged_tensor.Ragged, axis=None, keepdims=None, name=None):\n return ragged_reduce_aggregate(reduce_op=math_ops.reduce_max, unsorted_segment_op=math_ops.unsorted_segment_max, rt_input=input_tensor, axis=axis, keepdims=keepdims, name=name or 'RaggedReduceMax')", + "docstring": "For docs, see: _RAGGED_REDUCE_DOCSTRING.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py", + "ast_data": "FunctionDef name:reduce_max arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg Return return:yes Call BoolOp Call" + }, + { + "library": "kornia", + "name": "__init__", + "source_code": "def __init__(self, image_size: ImageSize, model_type: CameraModelType, params: Tensor) -> None:\n self._model = get_model_from_type(model_type, image_size, params)", + "docstring": "Construct CameraModel class. Args: image_size: Image size model_type: Camera model type params: Camera parameters of shape :math:.", + "type": "method", + "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:image_size arg:model_type arg:params arguments arg arg arg arg Assign Call" + }, + { + "library": "seaborn", + "name": "plot", + "source_code": "def plot(self, pyplot: bool=False) -> Plotter:\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)", + "docstring": "Compile the plot spec and return the Plotter object.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\plot.py", + "ast_data": "FunctionDef name:plot arg:self arg:pyplot arguments arg arg With Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "get_slice_bound", + "source_code": "def get_slice_bound(self, label: Hashable | Sequence[Hashable], side: Literal['left', 'right']) -> int:\n if not isinstance(label, tuple):\n label = (label,)\n return self._partial_tup_index(label, side=side)", + "docstring": "For an ordered MultiIndex, compute slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if `side=='right') position of given label. Parameters ---------- label : object or tuple of objects side : {'left', 'right'} Returns ------- int Index of label. Notes ----- This method only works if level 0 index of the MultiIndex is lexsorted. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list(\"abbc\"), list(\"gefd\")]) Get the locations from the leftmost 'b' in the first level until the end of the multiindex: >>> mi.get_slice_bound(\"b\", side=\"left\") 1 Like above, but if you get the locations from the rightmost 'b' in the first level and 'f' in the second level: >>> mi.get_slice_bound((\"b\", \"f\"), side=\"right\") 3 See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "FunctionDef name:get_slice_bound arg:self arg:label arg:side arguments arg arg arg If Call Assign Return return:yes Call" + }, + { + "library": "django", + "name": "get", + "source_code": "def get(self, *args, **kwargs):\n if self.query.combinator and (args or kwargs):\n raise NotSupportedError('Calling QuerySet.get(...) with filters after %s() is not supported.' % self.query.combinator)\n clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs)\n if self.query.can_filter() and (not self.query.distinct_fields):\n clone = clone.order_by()\n limit = None\n if not clone.query.select_for_update or connections[clone.db].features.supports_select_for_update_with_limit:\n limit = MAX_GET_RESULTS\n clone.query.set_limits(high=limit)\n num = len(clone)\n if num == 1:\n return clone._result_cache[0]\n if not num:\n raise self.model.DoesNotExist('%s matching query does not exist.' % self.model._meta.object_name)\n raise self.model.MultipleObjectsReturned('get() returned more than one %s -- it returned %s!' % (self.model._meta.object_name, num if not limit or num < limit else 'more than %s' % (limit - 1)))", + "docstring": "Perform the query and return a single object matching the given keyword arguments.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:get arg:self arguments arg arg arg If BoolOp BoolOp Raise Call Assign Call Call If BoolOp Call Assign Call Assign If BoolOp Assign Call Assign Call If Compare Return return:yes If Raise Call Raise Call BoolOp Compare" + }, + { + "library": "tensorflow", + "name": "get_function_def", + "source_code": "def get_function_def(self, name):\n if is_oss:\n with c_api_util.tf_buffer() as buffer_:\n pywrap_tfe.TFE_ContextGetFunctionDef(self._handle, name, buffer_)\n proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)\n function_def = function_pb2.FunctionDef()\n function_def.ParseFromString(proto_data)\n else:\n function_def = pywrap_tfe.TFE_ContextGetFunctionDefNoSerialization(self._handle, name)\n return function_def", + "docstring": "Get a function definition from the context. Args: name: function signature name. Returns: The requested FunctionDef. Raises: tf.errors.NotFoundError: if name is not the name of a registered function.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:get_function_def arg:self arg:name arguments arg arg If With Call Call Assign Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "transformed", + "source_code": "def transformed(self, transform):\n pts = self.get_points()\n ll, ul, lr = transform.transform(np.array([pts[0], [pts[0, 0], pts[1, 1]], [pts[1, 0], pts[0, 1]]]))\n return Bbox([ll, [lr[0], ul[1]]])", + "docstring": "Construct a by statically transforming this one by *transform*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:transformed arg:self arg:transform arguments arg arg Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, path, options=None):\n if not isinstance(options, TFRecordOptions):\n options = TFRecordOptions(compression_type=options)\n super(TFRecordWriter, self).__init__(compat.as_bytes(path), options._as_record_writer_options())", + "docstring": "Opens file and creates a writing to it. Args: path: The path to the TFRecords file. options: (optional) String specifying compression type, , or object. Raises: IOError: If cannot be opened for writing. ValueError: If valid compression_type can't be determined from .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\tf_record.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:path arg:options arguments arg arg arg If Call Assign Call Call Call Call Call" + }, + { + "library": "django", + "name": "_assign_extended_slice", + "source_code": "def _assign_extended_slice(self, start, stop, step, valueList):\n indexList = range(start, stop, step)\n if len(valueList) != len(indexList):\n raise ValueError('attempt to assign sequence of size %d to extended slice of size %d' % (len(valueList), len(indexList)))\n for i, val in zip(indexList, valueList):\n self._set_single(i, val)", + "docstring": "Assign an extended slice by re-assigning individual items", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py", + "ast_data": "FunctionDef name:_assign_extended_slice arg:self arg:start arg:stop arg:step arg:valueList arguments arg arg arg arg arg Assign Call If Compare Call Call Raise Call Call Call For Call Call" + }, + { + "library": "numpy", + "name": "has_samedomain", + "source_code": "def has_samedomain(self, other):\n return np.all(self.domain == other.domain)", + "docstring": "Check if domains match. Parameters ---------- other : class instance The other class must have the `` attribute. Returns ------- bool : boolean True if the domains are the same, False otherwise.", + "type": "method", + "file_path": "numpy\\numpy\\polynomial\\_polybase.py", + "ast_data": "FunctionDef name:has_samedomain arg:self arg:other arguments arg arg Return return:yes Call Compare" + }, + { + "library": "scipy", + "name": "isorth", + "source_code": "def isorth(A, tol=None):\n if DEBUGGING:\n if present(tol):\n assert tol >= 0\n num_vars = np.size(A, 1)\n if num_vars > np.size(A, 0):\n is_orth = False\n elif np.isnan(primasum(abs(A))):\n is_orth = False\n elif present(tol):\n is_orth = (abs(matprod(A.T, A) - np.eye(num_vars)) <= np.maximum(tol, tol * np.max(abs(A)))).all()\n else:\n is_orth = (abs(matprod(A.T, A) - np.eye(num_vars)) <= 0).all()\n return is_orth", + "docstring": "This function tests whether the matrix A has orthonormal columns up to the tolerance TOL.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\linalg.py", + "ast_data": "FunctionDef name:isorth arg:A arg:tol arguments arg arg If If Call Compare Assign Call If Compare Call Assign If Call Call Call Assign If Call Assign Call Compare Call Call Call Call Call Call Assign Call Compare Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_objective", + "source_code": "def _objective(mle, precision_, alpha):\n p = precision_.shape[0]\n cost = -2.0 * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)\n cost += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum())\n return cost", + "docstring": "Evaluation of the graphical-lasso objective function the objective function is made of a shifted scaled version of the normalized log-likelihood (i.e. its empirical mean over the samples) and a penalisation term to promote sparsity", + "type": "function", + "file_path": "scikit-learn\\sklearn\\covariance\\_graph_lasso.py", + "ast_data": "FunctionDef name:_objective arg:mle arg:precision_ arg:alpha arguments arg arg arg Assign Assign Call Call Call Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "build_signature_def", + "source_code": "@tf_export(v1=['saved_model.build_signature_def', 'saved_model.signature_def_utils.build_signature_def'])\n@deprecation.deprecated_endpoints('saved_model.signature_def_utils.build_signature_def')\ndef build_signature_def(inputs=None, outputs=None, method_name=None, defaults=None):\n signature_def = meta_graph_pb2.SignatureDef()\n if inputs is not None:\n for item in inputs:\n signature_def.inputs[item].CopyFrom(inputs[item])\n if outputs is not None:\n for item in outputs:\n signature_def.outputs[item].CopyFrom(outputs[item])\n if method_name is not None:\n signature_def.method_name = method_name\n if defaults is not None:\n for arg_name, default in defaults.items():\n if isinstance(default, ops.EagerTensor):\n signature_def.defaults[arg_name].CopyFrom(tensor_util.make_tensor_proto(default.numpy()))\n elif default.op.type == 'Const':\n signature_def.defaults[arg_name].CopyFrom(default.op.get_attr('value'))\n else:\n raise ValueError(f'Unable to convert object {str(default)} of type {type(default)} to TensorProto.')\n return signature_def", + "docstring": "Utility function to build a SignatureDef protocol buffer. Args: inputs: Inputs of the SignatureDef defined as a proto map of string to tensor info. outputs: Outputs of the SignatureDef defined as a proto map of string to tensor info. method_name: Method name of the SignatureDef as a string. defaults: Defaults of the SignatureDef defined as a proto map of string to TensorProto. Returns: A SignatureDef protocol buffer constructed based on the supplied arguments.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_def_utils_impl.py", + "ast_data": "FunctionDef name:build_signature_def arg:inputs arg:outputs arg:method_name arg:defaults arguments arg arg arg arg Assign Call If Compare For Call If Compare For Call If Compare Assign If Compare For Call If Call Call Call Call If Compare Call Call Raise Call Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "decompose_with_registry", + "source_code": "def decompose_with_registry(exported_program: torch.export.ExportedProgram, registry: _registration.ONNXRegistry) -> torch.export.ExportedProgram:\n onnx_registered_ops = set(_decomp.get_onnx_implemented_overloads(registry))\n decomp_table = _decomp.create_onnx_friendly_decomposition_table(onnx_registered_ops)\n return exported_program.run_decompositions(decomp_table)", + "docstring": "Decompose the exported program with the given registry. This function is needed so it shows clearly on the profiler results.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_fx_passes.py", + "ast_data": "FunctionDef name:decompose_with_registry arg:exported_program arg:registry arguments arg arg Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_store", + "source_code": "def _store(key_name, array, weights=None, splits=False, rank=False):\n array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits)\n if splits:\n for split_idx in range(n_splits):\n results['split%d_%s' % (split_idx, key_name)] = array[:, split_idx]\n array_means = np.average(array, axis=1, weights=weights)\n results['mean_%s' % key_name] = array_means\n if key_name.startswith(('train_', 'test_')) and np.any(~np.isfinite(array_means)):\n warnings.warn(f'One or more of the {key_name.split('_')[0]} scores are non-finite: {array_means}', category=UserWarning)\n array_stds = np.sqrt(np.average((array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights))\n results['std_%s' % key_name] = array_stds\n if rank:\n if np.isnan(array_means).all():\n rank_result = np.ones_like(array_means, dtype=np.int32)\n else:\n min_array_means = np.nanmin(array_means) - 1\n array_means = np.nan_to_num(array_means, nan=min_array_means)\n rank_result = rankdata(-array_means, method='min').astype(np.int32, copy=False)\n results['rank_%s' % key_name] = rank_result", + "docstring": "A small helper to store the scores/times to the cv_results_", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py", + "ast_data": "FunctionDef name:_store arg:key_name arg:array arg:weights arg:splits arg:rank arguments arg arg arg arg arg Assign Call Call If For Call Assign Assign Call Assign If BoolOp Call Call Call Call Call Assign Call Call Assign If If Call Call Assign Call Assign Call Assign Call Assign Call Call Assign" + }, + { + "library": "pandas", + "name": "item", + "source_code": "@final\ndef item(self):\n if len(self) == 1:\n return next(iter(self))\n raise ValueError('can only convert an array of size 1 to a Python scalar')", + "docstring": "Return the first element of the underlying data as a Python scalar. Returns ------- scalar The first element of Series or Index. Raises ------ ValueError If the data is not length = 1. See Also -------- Index.values : Returns an array representing the data in the Index. Series.head : Returns the first rows. Examples -------- >>> s = pd.Series([1]) >>> s.item() 1 For an index: >>> s = pd.Series([1], index=[\"a\"]) >>> s.index.item() 'a'", + "type": "method", + "file_path": "pandas\\pandas\\core\\base.py", + "ast_data": "FunctionDef name:item arg:self arguments arg If Compare Call Return return:yes Call Call Raise Call" + }, + { + "library": "tensorflow", + "name": "push_obj", + "source_code": "def push_obj(self, obj: T, offset: int=0):\n traceable_obj = TraceableObject(obj)\n self._stack.append(traceable_obj)\n return traceable_obj.set_filename_and_line_from_caller(offset + 1)", + "docstring": "Add object to the stack and record its filename and line information. Args: obj: An object to store on the stack. offset: Integer. If 0, the caller's stack frame is used. If 1, the caller's caller's stack frame is used. Returns: TraceableObject.SUCCESS if appropriate stack information was found, TraceableObject.HEURISTIC_USED if the stack was smaller than expected, and TraceableObject.FAILURE if the stack was empty.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\traceable_stack.py", + "ast_data": "FunctionDef name:push_obj arg:self arg:obj arg:offset arguments arg arg arg Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "allow_inflight_collective_as_graph_input_ctx", + "source_code": "@contextlib.contextmanager\ndef allow_inflight_collective_as_graph_input_ctx(value: bool=True):\n previous = torch._C._distributed_c10d._allow_inflight_collective_as_graph_input()\n try:\n torch._C._distributed_c10d._set_allow_inflight_collective_as_graph_input(value)\n yield\n finally:\n torch._C._distributed_c10d._set_allow_inflight_collective_as_graph_input(previous)", + "docstring": "Context manager to temporarily set whether inflight collectives are allowed as torch.compile graph inputs. Common use case is when the collective is issued in eager (with ) but waited in compiled region: With this context manager, when a collective is called, under the hood the work object of the collective will be registered in the work registry, and the wait_tensor() in compiled region called on the output tensor of the collective will wait on the correct work object.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py", + "ast_data": "FunctionDef name:allow_inflight_collective_as_graph_input_ctx arg:value arguments arg Assign Call Try Call Call" + }, + { + "library": "scikit-learn", + "name": "_get_metadata_for_step", + "source_code": "def _get_metadata_for_step(self, *, step_idx, step_params, all_params):\n if self.transform_input is None or not all_params or (not step_params) or (step_idx == 0):\n return step_params\n sub_pipeline = self[:step_idx]\n sub_metadata_routing = get_routing_for_object(sub_pipeline)\n transform_params = {key: value for key, value in all_params.items() if key in sub_metadata_routing.consumes(method='transform', params=all_params.keys())}\n transformed_params = dict()\n transformed_cache = dict()\n for method, method_params in step_params.items():\n transformed_params[method] = Bunch()\n for param_name, param_value in method_params.items():\n if param_name in self.transform_input:\n transformed_params[method][param_name] = _cached_transform(sub_pipeline, cache=transformed_cache, param_name=param_name, param_value=param_value, transform_params=transform_params)\n else:\n transformed_params[method][param_name] = param_value\n return transformed_params", + "docstring": "Get params (metadata) for step . This transforms the metadata up to this step if required, which is indicated by the parameter. If a param in is included in the list, it will be transformed. Parameters ---------- step_idx : int Index of the step in the pipeline. step_params : dict Parameters specific to the step. These are routed parameters, e.g. . If a parameter name here is included in the , then it will be transformed. Note that these parameters are *after* routing, so the aliases are already resolved. all_params : dict All parameters passed by the user. Here this is used to call on the slice of the pipeline itself. Returns ------- dict Parameters to be passed to the step. The ones which should be transformed are transformed.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:_get_metadata_for_step arg:self arguments arg arg arg arg If BoolOp Compare Compare Return return:yes Assign Assign Call Assign Call Compare Call Call Assign Call Assign Call For Call Assign Call For Call If Compare Assign Call Assign Return return:yes" + }, + { + "library": "kornia", + "name": "drop_path", + "source_code": "def drop_path(x: Tensor, drop_prob: Optional[float]=0.0, training: bool=False) -> Tensor:\n if drop_prob is None:\n drop_path = 0.0\n if drop_prob == 0.0 or not training:\n return x\n keep_prob = 1.0 - drop_prob\n shape = (x.shape[0],) + (1,) * (x.ndim - 1)\n random_tensor = x.new_empty(shape).bernoulli_(keep_prob)\n if keep_prob > 0.0:\n random_tensor.div_(keep_prob)\n output = x * random_tensor\n return output", + "docstring": "Apply stochastic depth sampling.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\layers\\drop_path.py", + "ast_data": "FunctionDef name:drop_path arg:x arg:drop_prob arg:training arguments arg arg arg If Compare Assign If BoolOp Compare Return return:yes Assign Assign Assign Call Call If Compare Call Assign Return return:yes" + }, + { + "library": "django", + "name": "get_media_prefix", + "source_code": "@register.tag\ndef get_media_prefix(parser, token):\n return PrefixNode.handle_token(parser, token, 'MEDIA_URL')", + "docstring": "Populate a template variable with the media prefix, ``. Usage:: {% get_media_prefix [as varname] %} Examples:: {% get_media_prefix %} {% get_media_prefix as media_prefix %}", + "type": "function", + "file_path": "django\\django\\templatetags\\static.py", + "ast_data": "FunctionDef name:get_media_prefix arg:parser arg:token arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_MockOp", + "source_code": "class _MockOp(object):\n\n def __init__(self, attrs, inputs, outputs, typ, skip_input_indices):\n self.attrs = attrs\n self.inputs = inputs\n self.outputs = outputs\n self.type = typ\n self.skip_input_indices = skip_input_indices\n\n def get_attr(self, attr):\n typ = op_attr_type(self.type, attr)\n for i in range(0, len(self.attrs), 2):\n if self.attrs[i] == attr:\n return make_attr(typ, self.attrs[i + 1])\n raise KeyError(attr)\n\n def _get_control_flow_context(self):\n raise NotImplementedError('tf.GradientTape.gradients() does not support graph control flow operations like tf.cond or tf.while at this time. Use tf.gradients() instead. If you need this feature, please file a feature request at https://github.com/tensorflow/tensorflow/issues/new')", + "docstring": "Pretends to be a tf.Operation for the gradient functions.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py", + "ast_data": "ClassDef name:_MockOp FunctionDef name:__init__ arg:self arg:attrs arg:inputs arg:outputs arg:typ arg:skip_input_indices arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign FunctionDef name:get_attr arg:self arg:attr arguments arg arg Assign Call For Call Call If Compare Return return:yes Call Raise Call FunctionDef name:_get_control_flow_context arg:self arguments arg Raise Call" + }, + { + "library": "pytorch", + "name": "get_group_rank", + "source_code": "def get_group_rank(group: ProcessGroup, global_rank: int) -> int:\n if group is GroupMember.WORLD:\n return global_rank\n if group not in _world.pg_group_ranks:\n raise ValueError(f'Group {group} is not registered, please create group with torch.distributed.new_group API')\n group_ranks = _world.pg_group_ranks[group]\n if global_rank not in group_ranks:\n raise ValueError(f'Global rank {global_rank} is not part of group {group}')\n return group_ranks[global_rank]", + "docstring": "Translate a global rank into a group rank. `` N.B. calling this function on the default process group returns identity", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:get_group_rank arg:group arg:global_rank arguments arg arg If Compare Return return:yes If Compare Raise Call Assign If Compare Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__imul__", + "source_code": "def __imul__(self, y):\n return self.__wrapped__ * y", + "docstring": "Avoid running self.__wrapped__ *= y, which mutates .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py", + "ast_data": "FunctionDef name:__imul__ arg:self arg:y arguments arg arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "coef_", + "source_code": "@property\ndef coef_(self):\n if self.kernel != 'linear':\n raise AttributeError('coef_ is only available when using a linear kernel')\n coef = self._get_coef()\n if sp.issparse(coef):\n coef.data.flags.writeable = False\n else:\n coef.flags.writeable = False\n return coef", + "docstring": "Weights assigned to the features when . Returns ------- ndarray of shape (n_features, n_classes)", + "type": "method", + "file_path": "scikit-learn\\sklearn\\svm\\_base.py", + "ast_data": "FunctionDef name:coef_ arg:self arguments arg If Compare Raise Call Assign Call If Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "graph_op_digests", + "source_code": "def graph_op_digests(self, op_type=None):\n if op_type is not None:\n return [digest for digest in self._graph_op_digests if digest.op_type == op_type]\n else:\n return self._graph_op_digests", + "docstring": "Get the list of the digests for graph-op creation so far. Args: op_type: Optional op type to filter the creation events with. Returns: A list of objects.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", + "ast_data": "FunctionDef name:graph_op_digests arg:self arg:op_type arguments arg arg If Compare Return return:yes Compare Return return:yes" + }, + { + "library": "pytorch", + "name": "convert_conv_weights_to_channels_last", + "source_code": "def convert_conv_weights_to_channels_last(gm: torch.fx.GraphModule):\n with dynamo_timed('convert_conv_weights_to_channels_last'):\n convs = [n for n in gm.graph.nodes if n.target == aten.convolution.default]\n for conv in convs:\n weight_node = conv.args[1]\n if len(weight_node.meta['val'].size()) != 4 or weight_node.meta['val'].is_contiguous(memory_format=torch.channels_last):\n continue\n with gm.graph.inserting_before(conv):\n new_node = gm.graph.call_function(aten.clone.default, (weight_node,), {'memory_format': torch.channels_last})\n conv.replace_input_with(weight_node, new_node)\n enforce_as_strided_input_layout(gm)\n enforce_output_layout(gm)", + "docstring": "Convert 4d convolution weight tensor to channels last format. This pass is performed before freezing so the added nodes can be constant folded by freezing.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\freezing.py", + "ast_data": "FunctionDef name:convert_conv_weights_to_channels_last arg:gm arguments arg With Call Assign Compare For Assign If BoolOp Compare Call Call Call With Call Assign Call Call Call Call" + }, + { + "library": "pytorch", + "name": "update_bn", + "source_code": "@torch.no_grad()\ndef update_bn(loader: Iterable[Any], model: Module, device: Optional[Union[int, torch.device]]=None):\n momenta = {}\n for module in model.modules():\n if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):\n module.reset_running_stats()\n momenta[module] = module.momentum\n if not momenta:\n return\n was_training = model.training\n model.train()\n for module in momenta.keys():\n module.momentum = None\n for input in loader:\n if isinstance(input, (list, tuple)):\n input = input[0]\n if device is not None:\n input = input.to(device)\n model(input)\n for bn_module in momenta.keys():\n bn_module.momentum = momenta[bn_module]\n model.train(was_training)", + "docstring": "Update BatchNorm running_mean, running_var buffers in the model. It performs one pass over data in to estimate the activation statistics for BatchNorm layers in the model. Args: loader (torch.utils.data.DataLoader): dataset loader to compute the activation statistics on. Each data batch should be either a tensor, or a list/tuple whose first element is a tensor containing data. model (torch.nn.Module): model for which we seek to update BatchNorm statistics. device (torch.device, optional): If set, data will be transferred to :attr: before being passed into :attr:. Example: >>> # xdoctest: +SKIP(\"Undefined variables\") >>> loader, model = ... >>> torch.optim.swa_utils.update_bn(loader, model) .. note:: The utility assumes that each data batch in :attr: is either a tensor or a list or tuple of tensors; in the latter case it is assumed that :meth: should be called on the first element of the list or tuple corresponding to the data batch.", + "type": "function", + "file_path": "pytorch\\torch\\optim\\swa_utils.py", + "ast_data": "FunctionDef name:update_bn arg:loader arg:model arg:device arguments arg arg arg Assign For Call If Call Call Assign If Return return:no Assign Call For Call Assign For If Call Assign If Compare Assign Call Call For Call Assign Call Call" + }, + { + "library": "pytorch", + "name": "list_pointwise_strategy", + "source_code": "def list_pointwise_strategy(op_schema: OpSchema, linearity: bool=False) -> StrategyType:\n\n def args_tuple_strategies(args_schema: tuple[object, ...]) -> list[TupleStrategy]:\n first_arg = args_schema[0]\n assert isinstance(first_arg, TupleStrategy)\n strategy_len = len(first_arg.childs)\n tuple_strategies: list[TupleStrategy] = []\n for arg_idx, arg in enumerate(args_schema):\n if isinstance(arg, TupleStrategy):\n assert len(arg.childs) == strategy_len\n tuple_strategies.append(arg)\n elif isinstance(arg, OpStrategy):\n if arg_idx > 0:\n tuple_strategies.append(TupleStrategy([arg for _ in range(strategy_len)]))\n else:\n raise RuntimeError(f'list op only supports tuple strategy! {op_schema}')\n return tuple_strategies\n args_strategies = args_tuple_strategies(op_schema.args_schema)\n follow_strategy: TupleStrategy = args_strategies[0]\n list_strategy: list[OpStrategy] = []\n for child_idx, child_strtgy in enumerate(follow_strategy.childs):\n assert isinstance(child_strtgy, OpStrategy)\n args_schema: list[OpStrategy] = [cast(OpStrategy, arg_strategy.childs[child_idx]) for arg_strategy in args_strategies]\n pointwise_strategy: OpStrategy = common_pointwise_strategy(args_schema, child_strtgy, linearity)\n list_strategy.append(pointwise_strategy)\n return TupleStrategy(list_strategy)", + "docstring": "Apply the pointwise strategy to the zipped arguments. For example, if we run a foreach add of two lists l1 and l2, then we apply the pointwise strategy on each pair (l1[i], l2[i]). If the first argument is a list but the second (or later) one is a tensor, then we broadcast the tensor by replicating it into a list with the length of the first argument. Args: mesh (DeviceMesh): device mesh for pointwise ops op_schema (OpSchema): schema of the operator to generate strategy for linearity (bool): specify whether op(a) + op(b) = op(a + b) Returns: OpStrategy: generated strategy", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_pointwise_ops.py", + "ast_data": "FunctionDef name:list_pointwise_strategy arg:op_schema arg:linearity arguments arg arg FunctionDef name:args_tuple_strategies arg:args_schema arguments arg Assign Call Assign Call For Call If Call Compare Call Call If Call If Compare Call Call Call Raise Call Return return:yes Assign Call For Call Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_MakeTensor", + "source_code": "def _MakeTensor(v, arg_name):\n if isinstance(v, tensor_pb2.TensorProto):\n return v\n raise TypeError(f\"Don't know how to convert {repr(v)} to a TensorProto for argument '{arg_name}'\")", + "docstring": "Ensure v is a TensorProto.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\op_def_library.py", + "ast_data": "FunctionDef name:_MakeTensor arg:v arg:arg_name arguments arg arg If Call Return return:yes Raise Call Call" + }, + { + "library": "kornia", + "name": "Q", + "source_code": "@property\ndef Q(self) -> Tensor:\n return self._Q_matrix", + "docstring": "The Q matrix of the horizontal stereo setup. This matrix is used for reprojecting a disparity tensor to the corresponding point cloud. Note that this is in a general form that allows different focal lengths in the x and y direction. Return: The Q matrix of shape :math:.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py", + "ast_data": "FunctionDef name:Q arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_emit_with_loc", + "source_code": "def _emit_with_loc(self, op_str, node=None):\n loc = ''\n if node:\n loc = self._create_mlir_loc(anno.getanno(node, anno.Basic.ORIGIN, default=None))\n self.emit(op_str + ' ' + loc)", + "docstring": "Emit the mlir operation with the location associated with the node. Args: op_str: The mlir operation string to be emitted. node: The node of the AST tree, the mlir operation translated from.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\tfr_gen.py", + "ast_data": "FunctionDef name:_emit_with_loc arg:self arg:op_str arg:node arguments arg arg arg Assign If Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "get_load_options", + "source_code": "def get_load_options():\n return _load_context.load_options()", + "docstring": "Returns the load options under a load context.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load_context.py", + "ast_data": "FunctionDef name:get_load_options arguments Return return:yes Call" + }, + { + "library": "scrapy", + "name": "get_addon", + "source_code": "def get_addon(self, cls: type[_T]) -> _T | None:\n return self._get_component(cls, self.addons.addons)", + "docstring": "Return the run-time instance of an :ref: of the specified class or a subclass, or `` if none is found. .. versionadded:: 2.12", + "type": "method", + "file_path": "scrapy\\scrapy\\crawler.py", + "ast_data": "FunctionDef name:get_addon arg:self arg:cls arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, file_pattern, batch_size=1, buffer_size=1, parallelism=1, shift_ratio=0, seed=0, name=None, batches=None, compression_type=None):\n self._batch_size = batch_size\n if batches is not None:\n self._batch_size *= batches\n self._batches = batches\n self._file_pattern = file_pattern\n self._buffer_size = buffer_size\n self._parallelism = parallelism\n self._shift_ratio = shift_ratio\n self._seed = seed\n self._name = name\n self._compression_type = python_io.TFRecordCompressionType.NONE\n if compression_type is not None:\n self._compression_type = compression_type", + "docstring": "Constructs a RecordInput Op. Args: file_pattern: File path to the dataset, possibly containing wildcards. All matching files will be iterated over each epoch. batch_size: How many records to return at a time. buffer_size: The maximum number of records the buffer will contain. parallelism: How many reader threads to use for reading from files. shift_ratio: What percentage of the total number files to move the start file forward by each epoch. seed: Specify the random number seed used by generator that randomizes records. name: Optional name for the operation. batches: None by default, creating a single batch op. Otherwise specifies how many batches to create, which are returned as a list when is called. An example use case is to split processing between devices on one computer. compression_type: The type of compression for the file. Currently ZLIB and GZIP are supported. Defaults to none. Raises: ValueError: If one of the arguments is invalid.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:file_pattern arg:batch_size arg:buffer_size arg:parallelism arg:shift_ratio arg:seed arg:name arg:batches arg:compression_type arguments arg arg arg arg arg arg arg arg arg arg Assign If Compare Assign Assign Assign Assign Assign Assign Assign Assign If Compare Assign" + }, + { + "library": "pytorch", + "name": "define_kernel", + "source_code": "def define_kernel(self, src_code, node_schedule, kernel):\n wrapper = V.graph.wrapper_code\n if src_code in wrapper.src_to_kernel:\n kernel_name = wrapper.src_to_kernel[src_code]\n else:\n kernel_name = f'halide_kernel_{wrapper.next_kernel_suffix()}'\n wrapper.src_to_kernel[src_code] = kernel_name\n wrapper.add_import_once('from torch._inductor.runtime.hints import HalideMeta, HalideInputSpec')\n compile_wrapper = IndentedBuffer()\n compile_wrapper.writeline(f\"async_compile.halide({kernel.halide_kernel_meta()!r}, '''\")\n compile_wrapper.splice(src_code, strip=True)\n compile_wrapper.writeline(\"''')\")\n origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper)\n metadata_comment = f'{origins}\\n{detailed_origins}'\n wrapper.define_kernel(kernel_name, compile_wrapper.getvalue(), metadata_comment)\n if is_metric_table_enabled('kernel_metadata'):\n log_kernel_metadata(kernel_name, '', src_code)\n return kernel_name", + "docstring": "Codegen kernel definition to go in output wrapper code", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\halide.py", + "ast_data": "FunctionDef name:define_kernel arg:self arg:src_code arg:node_schedule arg:kernel arguments arg arg arg arg Assign If Compare Assign Assign Call Assign Call Assign Call Call Call Call Call Assign Call Assign Call Call If Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "inserting_before", + "source_code": "@compatibility(is_backward_compatible=True)\ndef inserting_before(self, n: Optional[Node]=None):\n if n is None:\n return self.inserting_after(self._root)\n assert n.graph == self, 'Node to insert before is not in graph.'\n return _InsertPoint(self, n.prepend)", + "docstring": "Set the point at which create_node and companion methods will insert into the graph. When used within a 'with' statement, this will temporary set the insert point and then restore it when the with statement exits:: with g.inserting_before(n): ... # inserting before node n ... # insert point restored to what it was previously g.inserting_before(n) # set the insert point permanently Args: n (Optional[Node]): The node before which to insert. If None this will insert before the beginning of the entire graph. Returns: A resource manager that will restore the insert point on ``.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\graph.py", + "ast_data": "FunctionDef name:inserting_before arg:self arg:n arguments arg arg If Compare Return return:yes Call Compare Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "get_distribution", + "source_code": "def get_distribution(self):\n from numpy.distutils.core import get_distribution\n return get_distribution()", + "docstring": "Return the distutils distribution object for self.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\misc_util.py", + "ast_data": "FunctionDef name:get_distribution arg:self arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "HalfTweedieLossIdentity", + "source_code": "class HalfTweedieLossIdentity(BaseLoss):\n\n def __init__(self, sample_weight=None, power=1.5):\n super().__init__(closs=CyHalfTweedieLossIdentity(power=float(power)), link=IdentityLink())\n if self.closs.power <= 0:\n self.interval_y_true = Interval(-np.inf, np.inf, False, False)\n elif self.closs.power < 2:\n self.interval_y_true = Interval(0, np.inf, True, False)\n else:\n self.interval_y_true = Interval(0, np.inf, False, False)\n if self.closs.power == 0:\n self.interval_y_pred = Interval(-np.inf, np.inf, False, False)\n else:\n self.interval_y_pred = Interval(0, np.inf, False, False)", + "docstring": "Half Tweedie deviance loss with identity link, for regression. Domain: y_true in real numbers for power <= 0 y_true in non-negative real numbers for 0 < power < 2 y_true in positive real numbers for 2 <= power y_pred in positive real numbers for power != 0 y_pred in real numbers for power = 0 power in real numbers Link: y_pred = raw_prediction For a given sample x_i, half Tweedie deviance loss with p=power is defined as:: loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p) - y_true_i * raw_prediction_i**(1-p) / (1-p) + raw_prediction_i**(2-p) / (2-p) Note that the minimum value of this loss is 0. Note furthermore that although no Tweedie distribution exists for 0 < power < 1, it still gives a strictly consistent scoring function for the expectation.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\_loss\\loss.py", + "ast_data": "ClassDef name:HalfTweedieLossIdentity FunctionDef name:__init__ arg:self arg:sample_weight arg:power arguments arg arg arg Call Call Call Call Call If Compare Assign Call If Compare Assign Call Assign Call If Compare Assign Call Assign Call" + }, + { + "library": "scipy", + "name": "initial_vector", + "source_code": "def initial_vector(self):\n return asarray([np.random.uniform(l, u) for l, u in self.bounds])", + "docstring": "Random initialisation for the benchmark problem. Returns ------- x : sequence a vector of length `` that contains random floating point numbers that lie between the lower and upper bounds for a given parameter.", + "type": "method", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_benchmark.py", + "ast_data": "FunctionDef name:initial_vector arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_type_spec", + "source_code": "@abc.abstractproperty\ndef _type_spec(self):\n raise NotImplementedError(f'{type(self).__name__}._type_spec()')", + "docstring": "A describing the type of this value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor.py", + "ast_data": "FunctionDef name:_type_spec arg:self arguments arg Raise Call Call" + }, + { + "library": "pytorch", + "name": "export_to_regex", + "source_code": "def export_to_regex(self):\n return self._pattern(self.root, self._digest)", + "docstring": "Export the Trie to a regex pattern.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py", + "ast_data": "FunctionDef name:export_to_regex arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "random_split", + "source_code": "def random_split(dataset: Dataset[_T], lengths: Sequence[Union[int, float]], generator: Optional[Generator]=default_generator) -> list[Subset[_T]]:\n if math.isclose(sum(lengths), 1) and sum(lengths) <= 1:\n subset_lengths: list[int] = []\n for i, frac in enumerate(lengths):\n if frac < 0 or frac > 1:\n raise ValueError(f'Fraction at index {i} is not between 0 and 1')\n n_items_in_split = int(math.floor(len(dataset) * frac))\n subset_lengths.append(n_items_in_split)\n remainder = len(dataset) - sum(subset_lengths)\n for i in range(remainder):\n idx_to_add_at = i % len(subset_lengths)\n subset_lengths[idx_to_add_at] += 1\n lengths = subset_lengths\n for i, length in enumerate(lengths):\n if length == 0:\n warnings.warn(f'Length of split at index {i} is 0. This might result in an empty dataset.')\n if sum(lengths) != len(dataset):\n raise ValueError('Sum of input lengths does not equal the length of the input dataset!')\n indices = randperm(sum(lengths), generator=generator).tolist()\n lengths = cast(Sequence[int], lengths)\n return [Subset(dataset, indices[offset - length:offset]) for offset, length in zip(itertools.accumulate(lengths), lengths)]", + "docstring": "Randomly split a dataset into non-overlapping new datasets of given lengths. If a list of fractions that sum up to 1 is given, the lengths will be computed automatically as floor(frac * len(dataset)) for each fraction provided. After computing the lengths, if there are any remainders, 1 count will be distributed in round-robin fashion to the lengths until there are no remainders left. Optionally fix the generator for reproducible results, e.g.: Example: >>> # xdoctest: +SKIP >>> generator1 = torch.Generator().manual_seed(42) >>> generator2 = torch.Generator().manual_seed(42) >>> random_split(range(10), [3, 7], generator=generator1) >>> random_split(range(30), [0.3, 0.3, 0.4], generator=generator2) Args: dataset (Dataset): Dataset to be split lengths (sequence): lengths or fractions of splits to be produced generator (Generator): Generator used for the random permutation.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\data\\dataset.py", + "ast_data": "FunctionDef name:random_split arg:dataset arg:lengths arg:generator arguments arg arg arg If BoolOp Call Call Compare Call For Call If BoolOp Compare Compare Raise Call Assign Call Call Call Call Assign Call Call For Call Assign Call Assign For Call If Compare Call If Compare Call Call Raise Call Assign Call Call Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_get_spec_for", + "source_code": "def _get_spec_for(value_or_spec):\n if isinstance(value_or_spec, type_spec.TypeSpec):\n return value_or_spec\n return type_spec.type_spec_from_value(value_or_spec)", + "docstring": "Returns TypeSpec of a value or itself if it is a TypeSpec already.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py", + "ast_data": "FunctionDef name:_get_spec_for arg:value_or_spec arguments arg If Call Return return:yes Return return:yes Call" + }, + { + "library": "django", + "name": "nodata_value", + "source_code": "@property\ndef nodata_value(self):\n nodata_exists = c_int()\n value = capi.get_band_nodata_value(self._ptr, nodata_exists)\n if not nodata_exists:\n value = None\n elif self.datatype() in GDAL_INTEGER_TYPES:\n value = int(value)\n return value", + "docstring": "Return the nodata value for this band, or None if it isn't set.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py", + "ast_data": "FunctionDef name:nodata_value arg:self arguments arg Assign Call Assign Call If Assign If Compare Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "DemultiplexerIterDataPipe", + "source_code": "@functional_datapipe('demux')\nclass DemultiplexerIterDataPipe(IterDataPipe):\n\n def __new__(cls, datapipe: IterDataPipe, num_instances: int, classifier_fn: Callable[[_T_co], Optional[int]], drop_none: bool=False, buffer_size: int=1000):\n if num_instances < 1:\n raise ValueError(f'Expected `num_instances` larger than 0, but {num_instances} is found')\n _check_unpickable_fn(classifier_fn)\n container = _DemultiplexerIterDataPipe(datapipe, num_instances, classifier_fn, drop_none, buffer_size)\n return [_ChildDataPipe(container, i) for i in range(num_instances)]", + "docstring": "Splits the input DataPipe into multiple child DataPipes, using the given classification function (functional name: `Noneclassifier_fn` >>> def odd_or_even_no_zero(n): ... return n % 2 if n != 0 else None >>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even_no_zero, drop_none=True) >>> list(dp1) [2, 4] >>> list(dp2) [1, 3]", + "type": "class", + "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\combining.py", + "ast_data": "ClassDef name:DemultiplexerIterDataPipe FunctionDef name:__new__ arg:cls arg:datapipe arg:num_instances arg:classifier_fn arg:drop_none arg:buffer_size arguments arg arg arg arg arg arg If Compare Raise Call Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "_generate_bagging_indices", + "source_code": "def _generate_bagging_indices(random_state, bootstrap_features, bootstrap_samples, n_features, n_samples, max_features, max_samples):\n random_state = check_random_state(random_state)\n feature_indices = _generate_indices(random_state, bootstrap_features, n_features, max_features)\n sample_indices = _generate_indices(random_state, bootstrap_samples, n_samples, max_samples)\n return (feature_indices, sample_indices)", + "docstring": "Randomly draw feature and sample indices.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py", + "ast_data": "FunctionDef name:_generate_bagging_indices arg:random_state arg:bootstrap_features arg:bootstrap_samples arg:n_features arg:n_samples arg:max_features arg:max_samples arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "_lloyd_iteration", + "source_code": "def _lloyd_iteration(sample: np.ndarray, decay: float, qhull_options: str) -> np.ndarray:\n new_sample = np.empty_like(sample)\n voronoi = Voronoi(sample, qhull_options=qhull_options)\n for ii, idx in enumerate(voronoi.point_region):\n region = [i for i in voronoi.regions[idx] if i != -1]\n verts = voronoi.vertices[region]\n centroid = np.mean(verts, axis=0)\n new_sample[ii] = sample[ii] + (centroid - sample[ii]) * decay\n is_valid = np.all(np.logical_and(new_sample >= 0, new_sample <= 1), axis=1)\n sample[is_valid] = new_sample[is_valid]\n return sample", + "docstring": "Lloyd-Max algorithm iteration. Based on the implementation of Stéfan van der Walt: which is: Copyright (c) 2021-04-21 Stéfan van der Walt MIT License Parameters ---------- sample : array_like (n, d) The sample to iterate on. decay : float Relaxation decay. A positive value would move the samples toward their centroid, and negative value would move them away. 1 would move the samples to their centroid. qhull_options : str Additional options to pass to Qhull. See Qhull manual for details. (Default: \"Qbb Qc Qz Qj Qx\" for ndim > 4 and \"Qbb Qc Qz Qj\" otherwise.) Returns ------- sample : array_like (n, d) The sample after an iteration of Lloyd's algorithm.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_qmc.py", + "ast_data": "FunctionDef name:_lloyd_iteration arg:sample arg:decay arg:qhull_options arguments arg arg arg Assign Call Assign Call For Call Assign Compare Assign Assign Call Assign Assign Call Call Compare Compare Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "NeverExpires", + "source_code": "class NeverExpires(object):\n\n def expired(self):\n return False", + "docstring": "A representation of a never expiring object.", + "type": "class", + "file_path": "cherrypy\\cherrypy\\lib\\locking.py", + "ast_data": "ClassDef name:NeverExpires FunctionDef name:expired arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "ElasticNetBenchmark", + "source_code": "class ElasticNetBenchmark(Predictor, Estimator, Benchmark):\n param_names = ['representation', 'precompute']\n params = (['dense', 'sparse'], [True, False])\n\n def setup_cache(self):\n super().setup_cache()\n\n def make_data(self, params):\n representation, precompute = params\n if representation == 'dense':\n data = _synth_regression_dataset(n_samples=1000000, n_features=100)\n else:\n data = _synth_regression_sparse_dataset(n_samples=50000, n_features=5000, density=0.01)\n return data\n\n def make_estimator(self, params):\n representation, precompute = params\n estimator = ElasticNet(precompute=precompute, alpha=0.001, random_state=0)\n return estimator\n\n def make_scorers(self):\n make_gen_reg_scorers(self)\n\n def skip(self, params):\n representation, precompute = params\n if representation == 'sparse' and precompute is False:\n return True\n return False", + "docstring": "Benchmarks for ElasticNet.", + "type": "class", + "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\linear_model.py", + "ast_data": "ClassDef name:ElasticNetBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Assign If Compare Assign Call Assign Call Return return:yes FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call FunctionDef name:skip arg:self arg:params arguments arg arg Assign If BoolOp Compare Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "moving_average_variables", + "source_code": "@tf_export(v1=['moving_average_variables'])\ndef moving_average_variables(scope=None):\n return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, scope)", + "docstring": "Returns all variables that maintain their moving averages. If an object is created and the method is called on a list of variables, these variables will be added to the collection. This convenience function returns the contents of that collection. Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose attribute matches using . Items without a attribute are never returned if a scope is supplied. The choice of means that a without special tokens filters by prefix. Returns: A list of Variable objects.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:moving_average_variables arg:scope arguments arg Return return:yes Call Call" + }, + { + "library": "virtualenv", + "name": "locked", + "source_code": "@contextmanager\ndef locked(self, path):\n yield", + "docstring": "Do nothing.", + "type": "method", + "file_path": "virtualenv\\src\\virtualenv\\app_data\\na.py", + "ast_data": "FunctionDef name:locked arg:self arg:path arguments arg arg" + }, + { + "library": "django", + "name": "equals_lf", + "source_code": "def equals_lf(line):\n return line == ('\\n' if isinstance(line, str) else b'\\n')", + "docstring": "Return True if line (a text or bytestring) equals ' '.", + "type": "function", + "file_path": "django\\django\\core\\files\\base.py", + "ast_data": "FunctionDef name:equals_lf arg:line arguments arg Return return:yes Compare Call" + }, + { + "library": "scikit-learn", + "name": "_compute_partial_dependence_recursion", + "source_code": "def _compute_partial_dependence_recursion(self, grid, target_features):\n grid = np.asarray(grid, dtype=DTYPE, order='C')\n averaged_predictions = np.zeros(shape=grid.shape[0], dtype=np.float64, order='C')\n target_features = np.asarray(target_features, dtype=np.intp, order='C')\n self.tree_.compute_partial_dependence(grid, target_features, averaged_predictions)\n return averaged_predictions", + "docstring": "Fast partial dependence computation. Parameters ---------- grid : ndarray of shape (n_samples, n_target_features), dtype=np.float32 The grid points on which the partial dependence should be evaluated. target_features : ndarray of shape (n_target_features), dtype=np.intp The set of target features for which the partial dependence should be evaluated. Returns ------- averaged_predictions : ndarray of shape (n_samples,), dtype=np.float64 The value of the partial dependence function on each grid point.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\tree\\_classes.py", + "ast_data": "FunctionDef name:_compute_partial_dependence_recursion arg:self arg:grid arg:target_features arguments arg arg arg Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_distribute_function", + "source_code": "def _distribute_function(fn: Callable, fn_module: types.ModuleType, device_mesh: DeviceMesh, input_fn: Optional[Callable]=None, output_fn: Optional[Callable]=None) -> None:\n\n def wrapper(target_fn: Callable, input_fn: Optional[Callable], output_fn: Optional[Callable]) -> Callable:\n\n def inner_fn(*args: tuple[Any, ...], **kwargs: dict[str, Any]) -> Any:\n if input_fn is not None:\n args, kwargs = input_fn(device_mesh, *args, **kwargs)\n output = target_fn(*args, **kwargs)\n if output_fn is not None:\n output = output_fn(device_mesh, output)\n return output\n return inner_fn\n global _replaced_functions\n if fn in _replaced_functions:\n return\n wrapper_fn = wrapper(fn, input_fn, output_fn)\n setattr(fn_module, fn.__name__, wrapper_fn)\n _replaced_functions[wrapper_fn] = (fn.__name__, fn)", + "docstring": "`DeviceMesh`.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py", + "ast_data": "FunctionDef name:_distribute_function arg:fn arg:fn_module arg:device_mesh arg:input_fn arg:output_fn arguments arg arg arg arg arg FunctionDef name:wrapper arg:target_fn arg:input_fn arg:output_fn arguments arg arg arg FunctionDef name:inner_fn arguments arg arg If Compare Assign Call Assign Call If Compare Assign Call Return return:yes Return return:yes If Compare Return return:no Assign Call Call Assign" + }, + { + "library": "scipy", + "name": "from_number", + "source_code": "@classmethod\ndef from_number(cls, n, min=None):\n finfo = np.finfo(n.dtype)\n n_prec = finfo.precision + 1\n n_exp = number_digits(np.max(np.abs([finfo.maxexp, finfo.minexp])))\n width = 1 + 1 + n_prec + 1 + n_exp + 1\n if n < 0:\n width += 1\n repeat = int(np.floor(80 / width))\n return cls(width, n_prec, min, repeat=repeat)", + "docstring": "Given a float number, returns a \"reasonable\" ExpFormat instance to represent any number between -n and n. Parameters ---------- n : float max number one wants to be able to represent min : int minimum number of characters to use for the format Returns ------- res : ExpFormat ExpFormat instance with reasonable (see Notes) computed width Notes ----- Reasonable should be understood as the minimal string length necessary to avoid losing precision.", + "type": "method", + "file_path": "scipy\\scipy\\io\\_harwell_boeing\\_fortran_format_parser.py", + "ast_data": "FunctionDef name:from_number arg:cls arg:n arg:min arguments arg arg arg Assign Call Assign Assign Call Call Call Assign If Compare Assign Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "_idct_8x8", + "source_code": "def _idct_8x8(input: Tensor) -> Tensor:\n dtype: Dtype = input.dtype\n device: Device = input.device\n alpha: Tensor = torch.ones(8, dtype=dtype, device=device)\n alpha[0] = 1.0 / 2 ** 0.5\n dct_scale: Tensor = torch.outer(alpha, alpha)\n input = input * dct_scale[None, None]\n index: Tensor = torch.arange(8, dtype=dtype, device=device)\n x, y, u, v = torch.meshgrid(index, index, index, index)\n idct_tensor: Tensor = ((2.0 * u + 1.0) * x * pi / 16.0).cos() * ((2.0 * v + 1.0) * y * pi / 16.0).cos()\n output: Tensor = 0.25 * torch.tensordot(input, idct_tensor, dims=2) + 128.0\n return output", + "docstring": "Perform an 8 x 8 discrete cosine transform. Args: input (Tensor): Patched input tensor of the shape :math:. Returns: output (Tensor): DCT output tensor of the shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\enhance\\jpeg.py", + "ast_data": "FunctionDef name:_idct_8x8 arg:input arguments arg Call Assign Call Assign Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "check_gcc_function_attribute", + "source_code": "def check_gcc_function_attribute(cmd, attribute, name):\n cmd._check_compiler()\n body = textwrap.dedent('\\n #pragma GCC diagnostic error \"-Wattributes\"\\n #pragma clang diagnostic error \"-Wattributes\"\\n\\n int %s %s(void* unused)\\n {\\n return 0;\\n }\\n\\n int\\n main()\\n {\\n return 0;\\n }\\n ') % (attribute, name)\n return cmd.try_compile(body, None, None) != 0", + "docstring": "Return True if the given function attribute is supported.", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\command\\autodist.py", + "ast_data": "FunctionDef name:check_gcc_function_attribute arg:cmd arg:attribute arg:name arguments arg arg arg Call Assign Call Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "_save_tf_record_dataset", + "source_code": "def _save_tf_record_dataset(self, repr_ds: RepresentativeDataset, signature_def_key: str) -> _RepresentativeDatasetFile:\n if not context.executing_eagerly():\n with session.Session() as sess:\n repr_ds = replace_tensors_by_numpy_ndarrays(repr_ds, sess)\n expected_input_keys = self.expected_input_key_map.get(signature_def_key, None)\n tfrecord_file_path = self.path_map[signature_def_key]\n with python_io.TFRecordWriter(tfrecord_file_path) as writer:\n for repr_sample in repr_ds:\n if expected_input_keys is not None and set(repr_sample.keys()) != expected_input_keys:\n raise KeyError(f'Invalid input keys for representative sample. The function expects input keys of: {set(expected_input_keys)}. Got: {set(repr_sample.keys())}. Please provide correct input keys for representative samples.')\n sample = _RepresentativeDataSample()\n for input_name, input_value in repr_sample.items():\n sample.tensor_proto_inputs[input_name].CopyFrom(tensor_util.make_tensor_proto(input_value))\n writer.write(sample.SerializeToString())\n logging.info('Saved representative dataset for signature def: %s to: %s', signature_def_key, tfrecord_file_path)\n return _RepresentativeDatasetFile(tfrecord_file_path=str(tfrecord_file_path))", + "docstring": "Saves to a TFRecord file. Each sample in is serialized as . Args: repr_ds: to save. signature_def_key: The signature def key associated with . Returns: a RepresentativeDatasetFile instance contains the path to the saved file. Raises: KeyError: If the set of input keys in the dataset samples doesn't match the set of expected input keys.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\representative_dataset.py", + "ast_data": "FunctionDef name:_save_tf_record_dataset arg:self arg:repr_ds arg:signature_def_key arguments arg arg arg If Call With Call Assign Call Assign Call Assign With Call For If BoolOp Compare Compare Call Call Raise Call Call Call Call Assign Call For Call Call Call Call Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_slot_names", + "source_code": "def get_slot_names(self, *args, **kwargs):\n return self._opt.get_slot_names(*args, **kwargs)", + "docstring": "Return a list of the names of slots created by the . This simply wraps the get_slot_names() from the actual optimizer. Args: *args: Arguments for get_slot(). **kwargs: Keyword arguments for get_slot(). Returns: A list of strings.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_optimizer.py", + "ast_data": "FunctionDef name:get_slot_names arg:self arguments arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_patchA", + "source_code": "def set_patchA(self, patchA):\n self.patchA = patchA\n self.stale = True", + "docstring": "Set the tail patch. Parameters ---------- patchA :", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:set_patchA arg:self arg:patchA arguments arg arg Assign Assign" + }, + { + "library": "matplotlib", + "name": "resampled", + "source_code": "def resampled(self, lutsize):\n if hasattr(self, '_resample'):\n _api.warn_external(f'The ability to resample a color map is now public API However the class {type(self)} still only implements the previous private _resample method. Please update your class.')\n return self._resample(lutsize)\n raise NotImplementedError()", + "docstring": "Return a new colormap with *lutsize* entries.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:resampled arg:self arg:lutsize arguments arg arg If Call Call Call Return return:yes Call Raise Call" + }, + { + "library": "django", + "name": "_column_generated_sql", + "source_code": "def _column_generated_sql(self, field):\n expression_sql, params = field.generated_sql(self.connection)\n persistency_sql = 'STORED' if field.db_persist else 'VIRTUAL'\n if self.connection.features.requires_literal_defaults:\n expression_sql = expression_sql % tuple((self.quote_value(p) for p in params))\n params = ()\n return (f'GENERATED ALWAYS AS ({expression_sql}) {persistency_sql}', params)", + "docstring": "Return the SQL to use in a GENERATED ALWAYS clause.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\schema.py", + "ast_data": "FunctionDef name:_column_generated_sql arg:self arg:field arguments arg arg Assign Call Assign If Assign Call Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "CubicHermiteSpline", + "source_code": "class CubicHermiteSpline(PPoly):\n\n def __init__(self, x, y, dydx, axis=0, extrapolate=None):\n if extrapolate is None:\n extrapolate = True\n x, dx, y, axis, dydx = prepare_input(x, y, axis, dydx)\n dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))\n slope = np.diff(y, axis=0) / dxr\n t = (dydx[:-1] + dydx[1:] - 2 * slope) / dxr\n c = np.empty((4, len(x) - 1) + y.shape[1:], dtype=t.dtype)\n c[0] = t / dxr\n c[1] = (slope - dydx[:-1]) / dxr - t\n c[2] = dydx[:-1]\n c[3] = y[:-1]\n super().__init__(c, x, extrapolate=extrapolate)\n self.axis = axis", + "docstring": "Piecewise cubic interpolator to fit values and first derivatives (C1 smooth). The result is represented as a instance. Parameters ---------- x : array_like, shape (n,) 1-D array containing values of the independent variable. Values must be real, finite and in strictly increasing order. y : array_like Array containing values of the dependent variable. It can have arbitrary number of dimensions, but the length along `yyyBPoly.from_derivativesCubic Hermite spline `_ on Wikipedia.", + "type": "class", + "file_path": "scipy\\scipy\\interpolate\\_cubic.py", + "ast_data": "ClassDef name:CubicHermiteSpline FunctionDef name:__init__ arg:self arg:x arg:y arg:dydx arg:axis arg:extrapolate arguments arg arg arg arg arg arg If Compare Assign Assign Call Assign Call Assign Call Assign Assign Call Call Assign Assign Assign Assign Call Call Assign" + }, + { + "library": "tensorflow", + "name": "values", + "source_code": "@property\ndef values(self):\n return self._values", + "docstring": "Returns the per replica values.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "FunctionDef name:values arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_document_frequency", + "source_code": "def _document_frequency(X):\n if sp.issparse(X) and X.format == 'csr':\n return np.bincount(X.indices, minlength=X.shape[1])\n else:\n return np.diff(X.indptr)", + "docstring": "Count the number of non-zero values for each feature in sparse X.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py", + "ast_data": "FunctionDef name:_document_frequency arg:X arguments arg If BoolOp Call Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "Value", + "source_code": "class Value(Tensor):\n\n def numpy(self):\n pass", + "docstring": "Tensor that can be associated with a value (aka \"eager tensor\"). These objects represent the (usually future) output of executing an op immediately.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\types\\core.py", + "ast_data": "ClassDef name:Value FunctionDef name:numpy arg:self arguments arg" + }, + { + "library": "scipy", + "name": "_read_body_array", + "source_code": "def _read_body_array(cursor):\n from . import _fmm_core\n vals = np.zeros(cursor.header.shape, dtype=_field_to_dtype.get(cursor.header.field))\n _fmm_core.read_body_array(cursor, vals)\n return vals", + "docstring": "Read MatrixMarket array body", + "type": "function", + "file_path": "scipy\\scipy\\io\\_fast_matrix_market\\__init__.py", + "ast_data": "FunctionDef name:_read_body_array arg:cursor arguments arg Assign Call Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "get_rename_function", + "source_code": "def get_rename_function(mapper):\n\n def f(x):\n if x in mapper:\n return mapper[x]\n else:\n return x\n return f if isinstance(mapper, (abc.Mapping, ABCSeries)) else mapper", + "docstring": "Returns a function that will map names/labels, dependent if mapper is a dict, Series or just a function.", + "type": "function", + "file_path": "pandas\\pandas\\core\\common.py", + "ast_data": "FunctionDef name:get_rename_function arg:mapper arguments arg FunctionDef name:f arg:x arguments arg If Compare Return return:yes Return return:yes Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "alpha_max", + "source_code": "def alpha_max(emp_cov):\n A = np.copy(emp_cov)\n A.flat[::A.shape[0] + 1] = 0\n return np.max(np.abs(A))", + "docstring": "Find the maximum alpha for which there are some non-zeros off-diagonal. Parameters ---------- emp_cov : ndarray of shape (n_features, n_features) The sample covariance matrix. Notes ----- This results from the bound for the all the Lasso that are solved in GraphicalLasso: each time, the row of cov corresponds to Xy. As the bound for alpha is given by , the result follows.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\covariance\\_graph_lasso.py", + "ast_data": "FunctionDef name:alpha_max arg:emp_cov arguments arg Assign Call Assign Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "__mul__", + "source_code": "def __mul__(self, other):\n return self.__class__(self.scalar * other.scalar - np.dot(self.vector, other.vector), self.scalar * other.vector + self.vector * other.scalar + np.cross(self.vector, other.vector))", + "docstring": "Product of two quaternions i*i = j*j = k*k = i*j*k = -1 Quaternion multiplication can be expressed concisely using scalar and vector parts, see", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", + "ast_data": "FunctionDef name:__mul__ arg:self arg:other arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "cherrypy", + "name": "subscribe", + "source_code": "def subscribe(self, channel, callback=None, priority=None):\n if callback is None:\n return functools.partial(self.subscribe, channel, priority=priority)\n ch_listeners = self.listeners.setdefault(channel, set())\n ch_listeners.add(callback)\n if priority is None:\n priority = getattr(callback, 'priority', 50)\n self._priorities[channel, callback] = priority", + "docstring": "Add the given callback at the given channel (if not present). If callback is None, return a partial suitable for decorating the callback.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\wspbus.py", + "ast_data": "FunctionDef name:subscribe arg:self arg:channel arg:callback arg:priority arguments arg arg arg arg If Compare Return return:yes Call Assign Call Call Call If Compare Assign Call Assign" + }, + { + "library": "scikit-learn", + "name": "is_pandas_na", + "source_code": "def is_pandas_na(x):\n with suppress(ImportError):\n from pandas import NA\n return x is NA\n return False", + "docstring": "Test if x is pandas.NA. We intentionally do not use this function to return for in , because estimators that support are the exception rather than the rule at the moment. When is more universally supported, we may reconsider this decision. Parameters ---------- x : any type Returns ------- boolean", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_missing.py", + "ast_data": "FunctionDef name:is_pandas_na arg:x arguments arg With Call Return return:yes Compare Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_op_consumers", + "source_code": "def get_op_consumers(self, src_op_name):\n return self._op_consumers[src_op_name]", + "docstring": "Get all the downstream consumers of this op. Only data (non-control) edges are tracked. Args: src_op_name: Name of the op providing the tensor being consumed. Returns: A list of (src_slot, dst_op_name, dst_slot) tuples. In each item of the list: src_slot: 0-based output slot of the op of which the output tensor is being consumed. dst_op_name: Name of the consuming op (e.g., \"Conv2D_3/BiasAdd\") dst_slot: 0-based input slot of the consuming op that receives the tensor from this op.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", + "ast_data": "FunctionDef name:get_op_consumers arg:self arg:src_op_name arguments arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "resize_tensor_input", + "source_code": "def resize_tensor_input(self, input_index, tensor_size, strict=False):\n self._ensure_safe()\n tensor_size = np.array(tensor_size, dtype=np.int32)\n self._interpreter.ResizeInputTensor(input_index, tensor_size, strict)", + "docstring": "Resizes an input tensor. Args: input_index: Tensor index of input to set. This value can be gotten from the 'index' field in get_input_details. tensor_size: The tensor_shape to resize the input to. strict: Only unknown dimensions can be resized when is True. Unknown dimensions are indicated as in the attribute of a given tensor. (default False) Raises: ValueError: If the interpreter could not resize the input tensor. Usage:", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py", + "ast_data": "FunctionDef name:resize_tensor_input arg:self arg:input_index arg:tensor_size arg:strict arguments arg arg arg arg Call Assign Call Call" + }, + { + "library": "pytorch", + "name": "is_venv", + "source_code": "def is_venv(self) -> bool:\n return self.prefix.is_dir() and (self.prefix / 'pyvenv.cfg').is_file()", + "docstring": "Check if the prefix is a virtual environment.", + "type": "method", + "file_path": "pytorch\\tools\\nightly.py", + "ast_data": "FunctionDef name:is_venv arg:self arguments arg Return return:yes BoolOp Call Call" + }, + { + "library": "matplotlib", + "name": "ToolGrid", + "source_code": "class ToolGrid(ToolBase):\n description = 'Toggle major grids'\n default_keymap = property(lambda self: mpl.rcParams['keymap.grid'])\n\n def trigger(self, sender, event, data=None):\n sentinel = str(uuid.uuid4())\n with cbook._setattr_cm(event, key=sentinel), mpl.rc_context({'keymap.grid': sentinel}):\n mpl.backend_bases.key_press_handler(event, self.figure.canvas)", + "docstring": "Tool to toggle the major grids of the figure.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "ClassDef name:ToolGrid Assign Assign Call arguments arg FunctionDef name:trigger arg:self arg:sender arg:event arg:data arguments arg arg arg arg Assign Call Call With Call Call Call" + }, + { + "library": "matplotlib", + "name": "_draw_all", + "source_code": "def _draw_all(self):\n if self.orientation == 'vertical':\n if mpl.rcParams['ytick.minor.visible']:\n self.minorticks_on()\n elif mpl.rcParams['xtick.minor.visible']:\n self.minorticks_on()\n self.long_axis.set(label_position=self.ticklocation, ticks_position=self.ticklocation)\n self._short_axis().set_ticks([])\n self._short_axis().set_ticks([], minor=True)\n self._process_values()\n self.vmin, self.vmax = self._boundaries[self._inside][[0, -1]]\n X, Y = self._mesh()\n self._do_extends()\n lower, upper = (self.vmin, self.vmax)\n if self.long_axis.get_inverted():\n lower, upper = (upper, lower)\n if self.orientation == 'vertical':\n self.ax.set_xlim(0, 1)\n self.ax.set_ylim(lower, upper)\n else:\n self.ax.set_ylim(0, 1)\n self.ax.set_xlim(lower, upper)\n self.update_ticks()\n if self._filled:\n ind = np.arange(len(self._values))\n if self._extend_lower():\n ind = ind[1:]\n if self._extend_upper():\n ind = ind[:-1]\n self._add_solids(X, Y, self._values[ind, np.newaxis])", + "docstring": "Calculate any free parameters based on the current cmap and norm, and do all the drawing.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py", + "ast_data": "FunctionDef name:_draw_all arg:self arguments arg If Compare If Call If Call Call Call Call Call Call Call Assign Assign Call Call Assign If Call Assign If Compare Call Call Call Call Call If Assign Call Call If Call Assign If Call Assign Call" + }, + { + "library": "scipy", + "name": "min_distance_point", + "source_code": "def min_distance_point(self, x, p=2.0):\n return minkowski_distance(0, np.maximum(0, np.maximum(self.mins - x, x - self.maxes)), p)", + "docstring": "Return the minimum distance between input and points in the hyperrectangle. Parameters ---------- x : array_like Input. p : float, optional Input.", + "type": "method", + "file_path": "scipy\\scipy\\spatial\\_kdtree.py", + "ast_data": "FunctionDef name:min_distance_point arg:self arg:x arg:p arguments arg arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "initialize", + "source_code": "@tf_export(v1=['summary.initialize'])\ndef initialize(graph=None, session=None):\n if context.executing_eagerly():\n return\n if _summary_state.writer is None:\n raise RuntimeError('No default tf.contrib.summary.SummaryWriter found')\n if session is None:\n session = ops.get_default_session()\n if session is None:\n raise ValueError('Argument `session must be passed if no default session exists')\n session.run(summary_writer_initializer_op())\n if graph is not None:\n data = _serialize_graph(graph)\n x = array_ops.placeholder(dtypes.string)\n session.run(graph_v1(x, 0), feed_dict={x: data})", + "docstring": "Initializes summary writing for graph execution mode. This operation is a no-op when executing eagerly. This helper method provides a higher-level alternative to using and . Most users will also want to call which can happen before or after this function is called. Args: graph: A or to output to the writer. This function will not write the default graph by default. When writing to an event log file, the associated step will be zero. session: So this method can call . This defaults to . Raises: RuntimeError: If the current thread has no default . ValueError: If session wasn't passed and no default session.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:initialize arg:graph arg:session arguments arg arg If Call Return return:no If Compare Raise Call If Compare Assign Call If Compare Raise Call Call Call If Compare Assign Call Assign Call Call Call Call" + }, + { + "library": "pytorch", + "name": "float", + "source_code": "def float(self):\n _warn_typed_storage_removal()\n return self._to(torch.float)", + "docstring": "Casts this storage to float type.", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:float arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_raw_predict", + "source_code": "def _raw_predict(self, X, n_threads=None):\n check_is_fitted(self)\n is_binned = getattr(self, '_in_fit', False)\n if not is_binned:\n X = self._preprocess_X(X, reset=False)\n n_samples = X.shape[0]\n raw_predictions = np.zeros(shape=(n_samples, self.n_trees_per_iteration_), dtype=self._baseline_prediction.dtype, order='F')\n raw_predictions += self._baseline_prediction\n n_threads = _openmp_effective_n_threads(n_threads)\n self._predict_iterations(X, self._predictors, raw_predictions, is_binned, n_threads)\n return raw_predictions", + "docstring": "Return the sum of the leaves values over all predictors. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. n_threads : int, default=None Number of OpenMP threads to use. is called to determine the effective number of threads use, which takes cgroups CPU quotes into account. See the docstring of for details. Returns ------- raw_predictions : array, shape (n_samples, n_trees_per_iteration) The raw predicted values.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py", + "ast_data": "FunctionDef name:_raw_predict arg:self arg:X arg:n_threads arguments arg arg arg Call Assign Call If Assign Call Assign Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "symbolic_relations", + "source_code": "def symbolic_relations(self):\n graph = self.traced.graph\n for n in graph.nodes:\n self.infer_symbolic_relations(n)\n return True", + "docstring": "Infers algebraic relations", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py", + "ast_data": "FunctionDef name:symbolic_relations arg:self arguments arg Assign For Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "update_from", + "source_code": "def update_from(self, other):\n self._transform = other._transform\n self._transformSet = other._transformSet\n self._visible = other._visible\n self._alpha = other._alpha\n self.clipbox = other.clipbox\n self._clipon = other._clipon\n self._clippath = other._clippath\n self._label = other._label\n self._sketch = other._sketch\n self._path_effects = other._path_effects\n self.sticky_edges.x[:] = other.sticky_edges.x.copy()\n self.sticky_edges.y[:] = other.sticky_edges.y.copy()\n self.pchanged()\n self.stale = True", + "docstring": "Copy properties from *other* to *self*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:update_from arg:self arg:other arguments arg arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Call Assign Call Call Assign" + }, + { + "library": "pytorch", + "name": "generate_detector_report", + "source_code": "@abstractmethod\ndef generate_detector_report(self, model) -> tuple[str, dict[str, Any]]:\n pass", + "docstring": "Args model (nn.Module or subclass): model to find observer insertion points Returns a Tuple of two elements: Str: string report of the suggested improvements Dict: contains useful data collected by the observer pertinent to this report", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py", + "ast_data": "FunctionDef name:generate_detector_report arg:self arg:model arguments arg arg" + }, + { + "library": "pytorch", + "name": "replace_math_functions", + "source_code": "def replace_math_functions(input_string):\n output_string = input_string\n for func in MATH_TRANSPILATIONS:\n output_string = output_string.replace(f'{func}(', f'{MATH_TRANSPILATIONS[func]}(')\n return output_string", + "docstring": "FIXME: Temporarily replace std:: invocations of math functions with non-std:: versions to prevent linker errors NOTE: This can lead to correctness issues when running tests, since the correct version of the math function (exp/expf) might not get called. Plan is to remove this function once HIP supports std:: math function calls inside device code", + "type": "function", + "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py", + "ast_data": "FunctionDef name:replace_math_functions arg:input_string arguments arg Assign For Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_all_gather_flat_param", + "source_code": "def _all_gather_flat_param(self, padded_unsharded_flat_param: Tensor) -> Tensor:\n _p_assert(hasattr(self, 'process_group') and hasattr(self, 'world_size'), 'Expects a process group and world size to have been set via `shard()`')\n sharded_flat_param = self.flat_param.data\n expected_numel = sharded_flat_param.numel() * self.world_size\n _p_assert(padded_unsharded_flat_param.numel() == expected_numel, f'Expects {expected_numel} numel but got {padded_unsharded_flat_param.numel()}')\n pg = self._fake_process_group if self._use_fake_all_gather else self.process_group\n if sharded_flat_param.is_cpu:\n tensor_list = list(torch.chunk(padded_unsharded_flat_param, dist.get_world_size(pg)))\n dist.all_gather(tensor_list, sharded_flat_param, group=pg)\n else:\n dist.all_gather_into_tensor(padded_unsharded_flat_param, sharded_flat_param, pg)\n if self._offload_params:\n _no_dispatch_record_stream(sharded_flat_param, self._device_handle.current_stream())\n return padded_unsharded_flat_param", + "docstring": "All-gather the handle's flat parameter to the destination ``. Then switch to use the all-gathered tensor.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py", + "ast_data": "FunctionDef name:_all_gather_flat_param arg:self arg:padded_unsharded_flat_param arguments arg arg Call BoolOp Call Call Assign Assign Call Call Compare Call Call Assign If Assign Call Call Call Call Call If Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "dim", + "source_code": "def dim(self) -> int:\n return self.direction.shape[-1]", + "docstring": "Return the dimension in which the line holds.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\line.py", + "ast_data": "FunctionDef name:dim arg:self arguments arg Return return:yes" + }, + { + "library": "authlib", + "name": "InvalidGrantError", + "source_code": "class InvalidGrantError(OAuth2Error):\n error = 'invalid_grant'", + "docstring": "The provided authorization grant (e.g., authorization code, resource owner credentials) or refresh token is invalid, expired, revoked, does not match the redirection URI used in the authorization request, or was issued to another client.", + "type": "class", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\errors.py", + "ast_data": "ClassDef name:InvalidGrantError Assign" + }, + { + "library": "pytorch", + "name": "CLOSURE_MATCH", + "source_code": "def CLOSURE_MATCH(self, guard: Guard):\n if self.serialization_mode == 'save':\n raise RuntimeError('CLOSURE_MATCH guard cannot be serialized.')\n val = self.get(guard.name)\n if type(val) == types.FunctionType and hasattr(val, '__code__'):\n self._guard_on_attribute(guard, '__code__', GuardBuilder.HASATTR)\n self._guard_on_attribute(guard, '__code__', GuardBuilder.FUNCTION_MATCH)\n else:\n self.FUNCTION_MATCH(guard)", + "docstring": "matches a closure by __code__ id.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\guards.py", + "ast_data": "FunctionDef name:CLOSURE_MATCH arg:self arg:guard arguments arg arg If Compare Raise Call Assign Call If BoolOp Compare Call Call Call Call Call" + }, + { + "library": "scipy", + "name": "_add_reduced_axes", + "source_code": "def _add_reduced_axes(res, reduced_axes, keepdims):\n return [np.expand_dims(output, reduced_axes) if not isinstance(output, int) else output for output in res] if keepdims else res", + "docstring": "Add reduced axes back to all the arrays in the result object if keepdims = True.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_axis_nan_policy.py", + "ast_data": "FunctionDef name:_add_reduced_axes arg:res arg:reduced_axes arg:keepdims arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "decoder", + "source_code": "def decoder(conv_func):\n return lambda s: conv_func(s.decode())", + "docstring": "Convert bytestrings from Python's sqlite3 interface to a regular string.", + "type": "function", + "file_path": "django\\django\\db\\backends\\sqlite3\\base.py", + "ast_data": "FunctionDef name:decoder arg:conv_func arguments arg Return return:yes arguments arg Call Call" + }, + { + "library": "seaborn", + "name": "_show_cmap", + "source_code": "def _show_cmap(cmap):\n from .rcmod import axes_style\n with axes_style('white'):\n f, ax = plt.subplots(figsize=(8.25, 0.75))\n ax.set(xticks=[], yticks=[])\n x = np.linspace(0, 1, 256)[np.newaxis, :]\n ax.pcolormesh(x, cmap=cmap)", + "docstring": "Show a continuous matplotlib colormap.", + "type": "function", + "file_path": "seaborn\\seaborn\\widgets.py", + "ast_data": "FunctionDef name:_show_cmap arg:cmap arguments arg With Call Assign Call Call Assign Call Call" + }, + { + "library": "matplotlib", + "name": "_parse_composites", + "source_code": "def _parse_composites(fh):\n composites = {}\n for line in fh:\n line = line.rstrip()\n if not line:\n continue\n if line.startswith(b'EndComposites'):\n return composites\n vals = line.split(b';')\n cc = vals[0].split()\n name, _num_parts = (cc[1], _to_int(cc[2]))\n pccParts = []\n for s in vals[1:-1]:\n pcc = s.split()\n part = CompositePart(pcc[1], _to_float(pcc[2]), _to_float(pcc[3]))\n pccParts.append(part)\n composites[name] = pccParts\n raise RuntimeError('Bad composites parse')", + "docstring": "Parse the given filehandle for composites information return them as a dict. It is assumed that the file cursor is on the line behind 'StartComposites'. Returns ------- dict A dict mapping composite character names to a parts list. The parts list is a list of entries describing the parts of the composite. Examples -------- A composite definition line:: CC Aacute 2 ; PCC A 0 0 ; PCC acute 160 170 ; will be represented as:: composites['Aacute'] = [CompositePart(name='A', dx=0, dy=0), CompositePart(name='acute', dx=160, dy=170)]", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\_afm.py", + "ast_data": "FunctionDef name:_parse_composites arg:fh arguments arg Assign For Assign Call If If Call Return return:yes Assign Call Assign Call Assign Call Assign For Assign Call Assign Call Call Call Call Assign Raise Call" + }, + { + "library": "sphinx", + "name": "build_update", + "source_code": "@final\ndef build_update(self) -> None:\n self.compile_update_catalogs()\n to_build = self.get_outdated_docs()\n if isinstance(to_build, str):\n self.build(['__all__'], summary=to_build, method='update')\n else:\n to_build = set(to_build)\n self.build(to_build, summary=__('targets for %d source files that are out of date') % len(to_build), method='update')", + "docstring": "Only rebuild what was changed or added since last build.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\__init__.py", + "ast_data": "FunctionDef name:build_update arg:self arguments arg Call Assign Call If Call Call Assign Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "get_snap", + "source_code": "def get_snap(self):\n return self._snap", + "docstring": "Return the snap setting, which can be: * True: snap vertices to the nearest pixel center * False: leave vertices as-is * None: (auto) If the path contains only rectilinear line segments, round to the nearest pixel center", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:get_snap arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "ReentrancyLock", + "source_code": "class ReentrancyLock:\n\n def __init__(self, err_msg):\n self._rlock = threading.RLock()\n self._entered = False\n self._err_msg = err_msg\n\n def __enter__(self):\n self._rlock.acquire()\n if self._entered:\n self._rlock.release()\n raise ReentrancyError(self._err_msg)\n self._entered = True\n\n def __exit__(self, type, value, traceback):\n self._entered = False\n self._rlock.release()\n\n def decorate(self, func):\n\n def caller(func, *a, **kw):\n with self:\n return func(*a, **kw)\n return scipy._lib.decorator.decorate(func, caller)", + "docstring": "Threading lock that raises an exception for reentrant calls. Calls from different threads are serialized, and nested calls from the same thread result to an error. The object can be used as a context manager or to decorate functions via the decorate() method.", + "type": "class", + "file_path": "scipy\\scipy\\_lib\\_threadsafety.py", + "ast_data": "ClassDef name:ReentrancyLock FunctionDef name:__init__ arg:self arg:err_msg arguments arg arg Assign Call Assign Assign FunctionDef name:__enter__ arg:self arguments arg Call If Call Raise Call Assign FunctionDef name:__exit__ arg:self arg:type arg:value arg:traceback arguments arg arg arg arg Assign Call FunctionDef name:decorate arg:self arg:func arguments arg arg FunctionDef name:caller arg:func arguments arg arg arg With Return return:yes Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "GenericObject", + "source_code": "class GenericObject(ObjectDescription[str]):\n indextemplate: str = ''\n parse_node: Callable[[BuildEnvironment, str, desc_signature], str] | None = None\n\n def handle_signature(self, sig: str, signode: desc_signature) -> str:\n if self.parse_node:\n name = self.parse_node(self.env, sig, signode)\n else:\n signode.clear()\n signode += addnodes.desc_name(sig, sig)\n name = ws_re.sub(' ', sig)\n return name\n\n def add_target_and_index(self, name: str, sig: str, signode: desc_signature) -> None:\n node_id = make_id(self.env, self.state.document, self.objtype, name)\n signode['ids'].append(node_id)\n self.state.document.note_explicit_target(signode)\n if self.indextemplate:\n colon = self.indextemplate.find(':')\n if colon != -1:\n indextype = self.indextemplate[:colon].strip()\n indexentry = self.indextemplate[colon + 1:].strip() % (name,)\n else:\n indextype = 'single'\n indexentry = self.indextemplate % (name,)\n self.indexnode['entries'].append((indextype, indexentry, node_id, '', None))\n std = self.env.domains.standard_domain\n std.note_object(self.objtype, name, node_id, location=signode)", + "docstring": "A generic x-ref directive registered with Sphinx.add_object_type().", + "type": "class", + "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py", + "ast_data": "ClassDef name:GenericObject FunctionDef name:handle_signature arg:self arg:sig arg:signode arguments arg arg arg If Assign Call Call Call Assign Call Return return:yes FunctionDef name:add_target_and_index arg:self arg:name arg:sig arg:signode arguments arg arg arg arg Assign Call Call Call If Assign Call If Compare Assign Call Assign Call Assign Assign Call Assign Call" + }, + { + "library": "pytorch", + "name": "precompute_method", + "source_code": "def precompute_method(obj: Any, method: str) -> None:\n result = getattr(obj, method)()\n setattr(obj, method, lambda: result)", + "docstring": "Replace obj.method() with a new method that returns a precomputed constant.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\utils.py", + "ast_data": "FunctionDef name:precompute_method arg:obj arg:method arguments arg arg Assign Call Call Call arguments" + }, + { + "library": "scipy", + "name": "_compute_dminus", + "source_code": "def _compute_dminus(cdfvals, x):\n n = len(cdfvals)\n dminus = cdfvals - np.arange(0.0, n) / n\n amax = dminus.argmax()\n loc_max = x[amax]\n return (dminus[amax], loc_max)", + "docstring": "Computes D- as used in the Kolmogorov-Smirnov test. Parameters ---------- cdfvals : array_like Sorted array of CDF values between 0 and 1 x: array_like Sorted array of the stochastic variable itself Returns ------- res: Pair with the following elements: - Maximum distance of the CDF values above Uniform(0, 1) - The location at which the maximum is reached.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:_compute_dminus arg:cdfvals arg:x arguments arg arg Assign Call Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "unpack_float4x2_as_uint8", + "source_code": "def unpack_float4x2_as_uint8(tensor: torch.Tensor) -> np.ndarray:\n assert tensor.dtype == torch.float4_e2m1fn_x2\n data = tensor.view(torch.uint8).numpy(force=True).flatten()\n result_size = tensor.numel() * 2\n result = np.empty([result_size], dtype=np.uint8)\n array_low = data & np.uint8(15)\n array_high = data & np.uint8(240)\n array_high >>= np.uint8(4)\n result[0::2] = array_low\n result[1::2] = array_high\n result.resize(get_float4_shape(tensor), refcheck=False)\n return result", + "docstring": "Convert a float4x2 tensor to unpacked uint8 np array.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_type_casting.py", + "ast_data": "FunctionDef name:unpack_float4x2_as_uint8 arg:tensor arguments arg Compare Assign Call Call Call Assign Call Assign Call Assign Call Assign Call Call Assign Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "configure_collective_ops", + "source_code": "def configure_collective_ops(self, collective_leader='', scoped_allocator_enabled_ops=('CollectiveReduce',), use_nccl_communication=False, device_filters=None):\n if self._collective_leader is not None:\n if self._collective_leader != collective_leader or self._collective_scoped_allocator_enabled_ops != scoped_allocator_enabled_ops or self._collective_use_nccl_communication != use_nccl_communication or (self._collective_device_filters != device_filters):\n raise ValueError('Collective ops are already configured.')\n else:\n return\n if self._context_handle is not None:\n raise RuntimeError('Collective ops must be configured at program startup')\n self._collective_leader = collective_leader\n self._collective_scoped_allocator_enabled_ops = scoped_allocator_enabled_ops\n self._collective_use_nccl_communication = use_nccl_communication\n self._collective_device_filters = device_filters", + "docstring": "Configure collective ops. Collective group leader is necessary for collective ops to run, other configurations are mainly for the purpose of performance. Args: collective_leader: a device string for collective leader, e.g. \"/job:worker/replica:0/task:0\"; empty string means local execution of collective ops. scoped_allocator_enabled_ops: a tuple or a list of op names for scoped allocator to run with. use_nccl_communication: whether to use nccl communication for collective ops. device_filters: a tuple or a list of device strings. If set, corresponding task can only see the devices filtered by these device filters. Raises: RuntimeError: if this method is not called at program startup.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:configure_collective_ops arg:self arg:collective_leader arg:scoped_allocator_enabled_ops arg:use_nccl_communication arg:device_filters arguments arg arg arg arg arg If Compare If BoolOp Compare Compare Compare Compare Raise Call Return return:no If Compare Raise Call Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, export_dir):\n self._export_dir = export_dir\n self._variables_path = path_helpers.get_variables_path(export_dir)\n self._saved_model = parse_saved_model(export_dir)", + "docstring": "Creates a . Args: export_dir: Directory in which the SavedModel protocol buffer and variables to be loaded are located.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:export_dir arguments arg arg Assign Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "_copy_some_through", + "source_code": "def _copy_some_through(current, candidate):\n\n def copy_fn(cur_i, cand_i):\n if isinstance(cur_i, tensor_array_ops.TensorArray):\n return cand_i\n if cur_i.shape.rank == 0:\n return cand_i\n with ops.colocate_with(cand_i):\n return array_ops.where(elements_finished, cur_i, cand_i)\n return nest.map_structure(copy_fn, current, candidate)", + "docstring": "Copy some tensors through via array_ops.where.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\rnn.py", + "ast_data": "FunctionDef name:_copy_some_through arg:current arg:candidate arguments arg arg FunctionDef name:copy_fn arg:cur_i arg:cand_i arguments arg arg If Call Return return:yes If Compare Return return:yes With Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_Read", + "source_code": "def _Read(self, input_file, schema, raw_binary=False):\n raw_binary = ['--raw-binary'] if raw_binary else []\n with TemporaryDirectoryResource() as tempdir:\n basename = os.path.basename(input_file)\n basename_no_extension, extension = os.path.splitext(basename)\n if extension in ['.bin', '.tflite']:\n returncode = subprocess.call([self._flatc_path, '-t', '--strict-json', '--defaults-json'] + raw_binary + ['-o', tempdir, schema, '--', input_file])\n if returncode != 0:\n raise RuntimeError('flatc failed to convert from binary to json.')\n json_file = os.path.join(tempdir, basename_no_extension + '.json')\n if not os.path.exists(json_file):\n raise RuntimeError('Could not find %r' % json_file)\n elif extension == '.json':\n json_file = input_file\n else:\n raise ValueError('Invalid extension on input file %r' % input_file)\n return json.load(open(json_file))", + "docstring": "Read a tflite model assuming the given flatbuffer schema. If is in bin, then we must use flatc to convert the schema from binary to json. Args: input_file: a binary (flatbuffer) or json file to read from. Extension must be , , or for FlatBuffer Binary or FlatBuffer JSON. schema: which schema to use for reading raw_binary: whether to assume raw_binary (versions previous to v3) that lacked file_identifier require this. Raises: RuntimeError: 1. When flatc cannot be invoked. 2. When json file does not exists. ValueError: When the extension is not json or bin. Returns: A dictionary representing the read tflite model.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\schema\\upgrade_schema.py", + "ast_data": "FunctionDef name:_Read arg:self arg:input_file arg:schema arg:raw_binary arguments arg arg arg arg Assign With Call Assign Call Assign Call If Compare Assign Call If Compare Raise Call Assign Call If Call Raise Call If Compare Assign Raise Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "GraphExecutionTraceDigest", + "source_code": "class GraphExecutionTraceDigest(BaseDigest):\n\n def __init__(self, wall_time, locator, op_type, op_name, output_slot, graph_id):\n super().__init__(wall_time, locator)\n self._op_type = op_type\n self._op_name = op_name\n self._output_slot = output_slot\n self._graph_id = graph_id\n\n @property\n def op_type(self):\n return self._op_type\n\n @property\n def op_name(self):\n return self._op_name\n\n @property\n def output_slot(self):\n return self._output_slot\n\n @property\n def graph_id(self):\n return self._graph_id\n\n def to_json(self):\n output = super().to_json()\n output.update({'op_type': self.op_type, 'op_name': self.op_name, 'output_slot': self.output_slot, 'graph_id': self.graph_id})\n return output", + "docstring": "Light-weight summary of a intra-graph tensor execution event. Use on this object to read more detailed data (). Properties (beyond the base class): op_type: Type name of the executed op (e.g., \"Conv2D\"). op_name: Name of the op (e.g., \"conv_2d_3/Conv2D\"). output_slot: Output slot index of the tensor. graph_id: The debugger-generated ID of the innermost (immediately-enclosing) graph.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", + "ast_data": "ClassDef name:GraphExecutionTraceDigest FunctionDef name:__init__ arg:self arg:wall_time arg:locator arg:op_type arg:op_name arg:output_slot arg:graph_id arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:op_type arg:self arguments arg Return return:yes FunctionDef name:op_name arg:self arguments arg Return return:yes FunctionDef name:output_slot arg:self arguments arg Return return:yes FunctionDef name:graph_id arg:self arguments arg Return return:yes FunctionDef name:to_json arg:self arguments arg Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "scale", + "source_code": "@property\ndef scale(self):\n return self._scale", + "docstring": "Scaling factors of these Student's t distribution(s).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\student_t.py", + "ast_data": "FunctionDef name:scale arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "conv3d_transpose", + "source_code": "def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1), padding='valid', data_format=None):\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n if isinstance(output_shape, (tuple, list)):\n output_shape = array_ops_stack.stack(output_shape)\n x, tf_data_format = _preprocess_conv3d_input(x, data_format)\n if data_format == 'channels_first' and tf_data_format == 'NDHWC':\n output_shape = (output_shape[0], output_shape[2], output_shape[3], output_shape[4], output_shape[1])\n if output_shape[0] is None:\n output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])\n output_shape = array_ops_stack.stack(list(output_shape))\n padding = _preprocess_padding(padding)\n if tf_data_format == 'NDHWC':\n strides = (1,) + strides + (1,)\n else:\n strides = (1, 1) + strides\n x = nn.conv3d_transpose(x, kernel, output_shape, strides, padding=padding, data_format=tf_data_format)\n if data_format == 'channels_first' and tf_data_format == 'NDHWC':\n x = array_ops.transpose(x, (0, 4, 1, 2, 3))\n return x", + "docstring": "3D deconvolution (i.e. transposed convolution). Args: x: input tensor. kernel: kernel tensor. output_shape: 1D int tensor for the output shape. strides: strides tuple. padding: string, \"same\" or \"valid\". data_format: string, or . Returns: A tensor, result of transposed 3D convolution. Raises: ValueError: if is neither or .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:conv3d_transpose arg:x arg:kernel arg:output_shape arg:strides arg:padding arg:data_format arguments arg arg arg arg arg arg If Compare Assign Call If Compare Raise Call Call If Call Assign Call Assign Call If BoolOp Compare Compare Assign If Compare Assign Call Call Assign Call Call Assign Call If Compare Assign Assign Assign Call If BoolOp Compare Compare Assign Call Return return:yes" + }, + { + "library": "django", + "name": "_check_diff", + "source_code": "def _check_diff(cat_name, base_path):\n po_path = '%(path)s/en/LC_MESSAGES/django%(ext)s.po' % {'path': base_path, 'ext': 'js' if cat_name.endswith('-js') else ''}\n p = run(\"git diff -U0 %s | egrep '^[-+]msgid' | wc -l\" % po_path, capture_output=True, shell=True)\n num_changes = int(p.stdout.strip())\n print(\"%d changed/added messages in '%s' catalog.\" % (num_changes, cat_name))", + "docstring": "Output the approximate number of changed/added strings in the en catalog.", + "type": "function", + "file_path": "django\\scripts\\manage_translations.py", + "ast_data": "FunctionDef name:_check_diff arg:cat_name arg:base_path arguments arg arg Assign Call Assign Call Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "_decode_helper", + "source_code": "def _decode_helper(obj):\n if isinstance(obj, dict) and 'class_name' in obj:\n if obj['class_name'] == 'TensorShape':\n return tensor_shape.TensorShape(obj['items'])\n elif obj['class_name'] == 'TypeSpec':\n return type_spec_registry.lookup(obj['type_spec'])._deserialize(_decode_helper(obj['serialized']))\n elif obj['class_name'] == '__tuple__':\n return tuple((_decode_helper(i) for i in obj['items']))\n elif obj['class_name'] == '__ellipsis__':\n return Ellipsis\n return obj", + "docstring": "A decoding helper that is TF-object aware.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\json_utils.py", + "ast_data": "FunctionDef name:_decode_helper arg:obj arguments arg If BoolOp Call Compare If Compare Return return:yes Call If Compare Return return:yes Call Call Call If Compare Return return:yes Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "l1_unstructured", + "source_code": "def l1_unstructured(module, name, amount, importance_scores=None):\n L1Unstructured.apply(module, name, amount=amount, importance_scores=importance_scores)\n return module", + "docstring": "Prune tensor by removing units with the lowest L1-norm. Prunes tensor corresponding to parameter called `amount`, it represents the absolute number of parameters to prune. importance_scores (torch.Tensor): tensor of importance scores (of same shape as module parameter) used to compute mask for pruning. The values in this tensor indicate the importance of the corresponding elements in the parameter being pruned. If unspecified or None, the module parameter will be used in its place. Returns: module (nn.Module): modified (i.e. pruned) version of the input module Examples: >>> # xdoctest: +SKIP >>> m = prune.l1_unstructured(nn.Linear(2, 3), 'weight', amount=0.2) >>> m.state_dict().keys() odict_keys(['bias', 'weight_orig', 'weight_mask'])", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\prune.py", + "ast_data": "FunctionDef name:l1_unstructured arg:module arg:name arg:amount arg:importance_scores arguments arg arg arg arg Call Return return:yes" + }, + { + "library": "django", + "name": "write", + "source_code": "def write(self, outfile, encoding):\n raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method')", + "docstring": "Output the feed in the given encoding to outfile, which is a file-like object. Subclasses should override this.", + "type": "method", + "file_path": "django\\django\\utils\\feedgenerator.py", + "ast_data": "FunctionDef name:write arg:self arg:outfile arg:encoding arguments arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "list_to_tuple", + "source_code": "@tf_export('__internal__.nest.list_to_tuple', v1=[])\ndef list_to_tuple(structure):\n\n def sequence_fn(instance, args):\n if isinstance(instance, list):\n return tuple(args)\n return nest_util.sequence_like(instance, args)\n return nest_util.pack_sequence_as(nest_util.Modality.CORE, structure, flatten(structure), False, sequence_fn=sequence_fn)", + "docstring": "Replace all lists with tuples. The fork of nest that tf.data uses treats lists as atoms, while tf.nest treats them as structures to recurse into. Keras has chosen to adopt the latter convention, and must therefore deeply replace all lists with tuples before passing structures to Dataset.from_generator. Args: structure: A nested structure to be remapped. Returns: structure mapped to replace all lists with tuples.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\nest.py", + "ast_data": "FunctionDef name:list_to_tuple arg:structure arguments arg FunctionDef name:sequence_fn arg:instance arg:args arguments arg arg If Call Return return:yes Call Return return:yes Call Return return:yes Call Call Call" + }, + { + "library": "kornia", + "name": "image_size", + "source_code": "@property\ndef image_size(self) -> ImageSize:\n return self._image_size", + "docstring": "Returns the image size of the camera model.", + "type": "method", + "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py", + "ast_data": "FunctionDef name:image_size arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "poisson_nll_loss", + "source_code": "@elementwise_type_promotion_wrapper(type_promoting_args=('input', 'target'), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)\ndef poisson_nll_loss(input: TensorLikeType, target: TensorLikeType, log_input: bool=True, full: bool=False, size_average: Optional[bool]=None, eps: float=1e-08, reduce: Optional[bool]=None, reduction: str='mean') -> TensorLikeType:\n if size_average is not None or reduce is not None:\n reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)\n _check_reduction_value(reduction)\n if log_input:\n loss = torch.exp(input) - target * input\n else:\n loss = input - target * torch.log(input + eps)\n if full:\n stirling_term = target * torch.log(target) - target + 0.5 * torch.log(2 * torch.pi * target)\n loss = loss + stirling_term.masked_fill(target <= 1, 0)\n return _apply_loss_reduction(loss, reduction)", + "docstring": "Reference implementation of torch.nn.functional.poisson_nll_loss", + "type": "function", + "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py", + "ast_data": "FunctionDef name:poisson_nll_loss arg:input arg:target arg:log_input arg:full arg:size_average arg:eps arg:reduce arg:reduction arguments arg arg arg arg arg arg arg arg If BoolOp Compare Compare Assign Call Call If Assign Call Assign Call If Assign Call Call Assign Call Compare Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "uniform_", + "source_code": "@_sharded_op_impl(torch.nn.init.uniform_)\ndef uniform_(types, args=(), kwargs=None, pg=None):\n validate_param(kwargs, 'kwargs')\n sharded_tensor = kwargs['tensor']\n validate_param(sharded_tensor, 'tensor')\n a = kwargs['a']\n validate_param(a, 'a')\n b = kwargs['b']\n validate_param(b, 'b')\n for shard in sharded_tensor.local_shards():\n torch.nn.init.uniform_(shard.tensor, a=a, b=b)\n return sharded_tensor", + "docstring": "Fills the Tensor in tensor.local_shards with values drawn from the uniform distribution :math:. Args: tensor: tensor sharded across devices a: the lower bound of the uniform distribution b: the upper bound of the uniform distribution", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\_ops\\init.py", + "ast_data": "FunctionDef name:uniform_ arg:types arg:args arg:kwargs arg:pg arguments arg arg arg arg Call Assign Call Assign Call Assign Call For Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "round", + "source_code": "@tf_export('math.round', 'round')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef round(x, name=None):\n x = ops.convert_to_tensor(x, name='x')\n if x.dtype.is_integer:\n return x\n else:\n return gen_math_ops.round(x, name=name)", + "docstring": "Rounds the values of a tensor to the nearest integer, element-wise. Rounds half to even. Also known as bankers rounding. If you want to round according to the current system rounding mode use tf::cint. For example: Args: x: A of type , , , , or . name: A name for the operation (optional). Returns: A of same shape and type as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:round arg:x arg:name arguments arg arg Assign Call If Return return:yes Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "MaxWidth", + "source_code": "class MaxWidth(MaxExtent):\n\n def __init__(self, artist_list):\n super().__init__(artist_list, 'width')", + "docstring": "Size whose absolute part is the largest width of the given *artist_list*.", + "type": "class", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py", + "ast_data": "ClassDef name:MaxWidth FunctionDef name:__init__ arg:self arg:artist_list arguments arg arg Call Call" + }, + { + "library": "numpy", + "name": "hermmulx", + "source_code": "def hermmulx(c):\n [c] = pu.as_series([c])\n if len(c) == 1 and c[0] == 0:\n return c\n prd = np.empty(len(c) + 1, dtype=c.dtype)\n prd[0] = c[0] * 0\n prd[1] = c[0] / 2\n for i in range(1, len(c)):\n prd[i + 1] = c[i] / 2\n prd[i - 1] += c[i] * i\n return prd", + "docstring": "Multiply a Hermite series by x. Multiply the Hermite series by x, where x is the independent variable. Parameters ---------- c : array_like 1-D array of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the result of the multiplication. See Also -------- hermadd, hermsub, hermmul, hermdiv, hermpow Notes ----- The multiplication uses the recursion relationship for Hermite polynomials in the form .. math:: xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x)) Examples -------- >>> from numpy.polynomial.hermite import hermmulx >>> hermmulx([1, 2, 3]) array([2. , 6.5, 1. , 1.5])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\hermite.py", + "ast_data": "FunctionDef name:hermmulx arg:c arguments arg Assign Call If BoolOp Compare Call Compare Return return:yes Assign Call Call Assign Assign For Call Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "combine_kwargs", + "source_code": "def combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict:\n if engine_kwargs is None:\n result = {}\n else:\n result = engine_kwargs.copy()\n result.update(kwargs)\n return result", + "docstring": "Used to combine two sources of kwargs for the backend engine. Use of kwargs is deprecated, this function is solely for use in 1.3 and should be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs or kwargs must be None or empty respectively. Parameters ---------- engine_kwargs: dict kwargs to be passed through to the engine. kwargs: dict kwargs to be psased through to the engine (deprecated) Returns ------- engine_kwargs combined with kwargs", + "type": "function", + "file_path": "pandas\\pandas\\io\\excel\\_util.py", + "ast_data": "FunctionDef name:combine_kwargs arg:engine_kwargs arg:kwargs arguments arg arg If Compare Assign Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_figure", + "source_code": "def get_figure(self, root=None):\n if self._root_figure is self:\n return self\n if self._parent is self._root_figure:\n return self._parent\n if root is None:\n message = 'From Matplotlib 3.12 SubFigure.get_figure will by default return the direct parent figure, which may be a SubFigure. To suppress this warning, pass the root parameter. Pass `True` to maintain the old behavior and `False` to opt-in to the future behavior.'\n _api.warn_deprecated('3.10', message=message)\n root = True\n if root:\n return self._root_figure\n return self._parent", + "docstring": "Return the or instance the (Sub)Figure belongs to. Parameters ---------- root : bool, default=True If False, return the (Sub)Figure this artist is on. If True, return the root Figure for a nested tree of SubFigures. .. deprecated:: 3.10 From version 3.12 *root* will default to False.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:get_figure arg:self arg:root arguments arg arg If Compare Return return:yes If Compare Return return:yes If Compare Assign Call Assign If Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "fit_loc_scale", + "source_code": "def fit_loc_scale(self, data, *args):\n mu, mu2 = self.stats(*args, **{'moments': 'mv'})\n tmp = asarray(data)\n muhat = tmp.mean()\n mu2hat = tmp.var()\n Shat = sqrt(mu2hat / mu2)\n with np.errstate(invalid='ignore'):\n Lhat = muhat - Shat * mu\n if not np.isfinite(Lhat):\n Lhat = 0\n if not (np.isfinite(Shat) and 0 < Shat):\n Shat = 1\n return (Lhat, Shat)", + "docstring": "Estimate loc and scale parameters from data using 1st and 2nd moments. Parameters ---------- data : array_like Data to fit. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). Returns ------- Lhat : float Estimated location parameter for the data. Shat : float Estimated scale parameter for the data.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:fit_loc_scale arg:self arg:data arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call With Call Assign If Call Assign If BoolOp Call Compare Assign Return return:yes" + }, + { + "library": "numpy", + "name": "chebmulx", + "source_code": "def chebmulx(c):\n [c] = pu.as_series([c])\n if len(c) == 1 and c[0] == 0:\n return c\n prd = np.empty(len(c) + 1, dtype=c.dtype)\n prd[0] = c[0] * 0\n prd[1] = c[0]\n if len(c) > 1:\n tmp = c[1:] / 2\n prd[2:] = tmp\n prd[0:-2] += tmp\n return prd", + "docstring": "Multiply a Chebyshev series by x. Multiply the polynomial by x, where x is the independent variable. Parameters ---------- c : array_like 1-D array of Chebyshev series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the result of the multiplication. See Also -------- chebadd, chebsub, chebmul, chebdiv, chebpow Examples -------- >>> from numpy.polynomial import chebyshev as C >>> C.chebmulx([1,2,3]) array([1. , 2.5, 1. , 1.5])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\chebyshev.py", + "ast_data": "FunctionDef name:chebmulx arg:c arguments arg Assign Call If BoolOp Compare Call Compare Return return:yes Assign Call Call Assign Assign If Compare Call Assign Assign Return return:yes" + }, + { + "library": "kornia", + "name": "worldtocam_to_camtoworld_Rt", + "source_code": "def worldtocam_to_camtoworld_Rt(R: Tensor, t: Tensor) -> tuple[Tensor, Tensor]:\n KORNIA_CHECK_SHAPE(R, ['B', '3', '3'])\n KORNIA_CHECK_SHAPE(t, ['B', '3', '1'])\n R_inv = R.transpose(1, 2)\n new_t: Tensor = -R_inv @ t\n return (R_inv, new_t)", + "docstring": "Convert worldtocam frame used in Colmap to camtoworld. Args: R: Rotation matrix, :math: t: Translation matrix :math:. Returns: Rinv: Rotation matrix, :math: tinv: Translation matrix :math:. Example: >>> R, t = torch.eye(3)[None], torch.ones(3).reshape(1, 3, 1) >>> worldtocam_to_camtoworld_Rt(R, t) (tensor([[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]]), tensor([[[-1.], [-1.], [-1.]]]))", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:worldtocam_to_camtoworld_Rt arg:R arg:t arguments arg arg Call Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "value", + "source_code": "def value(self) -> T:\n return super().value()", + "docstring": "Obtain the value of an already-completed future. This method should only be called after a call to :meth: has completed, or inside a callback function passed to :meth:. In other cases this `waitthen` method will also throw an error.", + "type": "method", + "file_path": "pytorch\\torch\\futures\\__init__.py", + "ast_data": "FunctionDef name:value arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "_embed_boxes", + "source_code": "def _embed_boxes(self, boxes: Tensor) -> Tensor:\n boxes = boxes + 0.5\n coords = boxes.reshape(-1, 2, 2)\n corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)\n corner_embedding[:, 0, :] += self.point_embeddings[2].weight\n corner_embedding[:, 1, :] += self.point_embeddings[3].weight\n return corner_embedding", + "docstring": "Embeds box prompts.", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\prompt_encoder.py", + "ast_data": "FunctionDef name:_embed_boxes arg:self arg:boxes arguments arg arg Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "assert_consumed", + "source_code": "def assert_consumed(self):\n unused_attributes = list(self._checkpoint.unused_attributes.items())\n unused_attributes = [a for a in unused_attributes if all((a[0] is not x for x in self._optionally_restored))]\n if unused_attributes:\n unused_attribute_string = ''.join((f'\\n {obj}: {attributes}' for obj, attributes in unused_attributes))\n raise AssertionError(f'Some objects had attributes which were not restored: {unused_attribute_string}')\n for trackable in util.list_objects(self._object_graph_view):\n trackable._maybe_initialize_trackable()\n if trackable._update_uid < self._checkpoint.restore_uid:\n raise AssertionError(f'Object not restored: {trackable}')\n return self", + "docstring": "Raises an exception if any variables are unmatched.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:assert_consumed arg:self arguments arg Assign Call Call Assign Call Compare If Assign Call Raise Call For Call Call If Compare Raise Call Return return:yes" + }, + { + "library": "kornia", + "name": "params", + "source_code": "@property\ndef params(self) -> Tensor:\n return self._params", + "docstring": "Returns the camera parameters.", + "type": "method", + "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py", + "ast_data": "FunctionDef name:params arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_Restructure", + "source_code": "def _Restructure(l, structure):\n result = []\n current_index = 0\n for element in structure:\n if element is None:\n result.append(l[current_index])\n current_index += 1\n else:\n result.append(l[current_index:current_index + element])\n current_index += element\n if len(result) == 1:\n return result[0]\n else:\n return tuple(result)", + "docstring": "Returns the elements of list l structured according to the given structure. A structure is represented by a list whose elements are either or a non-negative integer. corresponds to a single element in the output list, and an integer N corresponds to a nested list of length N. The function returns a data structure whose shape is given by , and whose elements are taken from . If is a singleton, the function returns the single data structure implied by the 0th element of . For example: _Restructure([\"foo\", \"bar\", \"baz\", \"qux\"], [None, 2, None]) -> [\"foo\", [\"bar\", \"baz\"], \"qux\"] _Restructure([\"foo\"], [None]) -> \"foo\" _Restructure([\"foo\"], [1]) -> [\"foo\"] _Restructure([], [0]) -> [] Args: l: A list. structure: A list whose elements are either or a non-negative integer. Returns: The elements of , restructured according to . If is a list of length 1, this function returns the single data structure implied by .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\op_def_library.py", + "ast_data": "FunctionDef name:_Restructure arg:l arg:structure arguments arg arg Assign Assign For If Compare Call Call If Compare Call Return return:yes Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "sparsify", + "source_code": "def sparsify(self):\n msg = 'Estimator, %(name)s, must be fitted before sparsifying.'\n check_is_fitted(self, msg=msg)\n self.coef_ = sp.csr_matrix(self.coef_)\n return self", + "docstring": "Convert coefficient matrix to sparse format. Converts the ``, must be more than 50% for this to provide significant benefits. After calling this method, further fitting with the partial_fit method (if any) will not work until you call densify.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py", + "ast_data": "FunctionDef name:sparsify arg:self arguments arg Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_set_save_slice_info", + "source_code": "def _set_save_slice_info(self, save_slice_info):\n self._save_slice_info = save_slice_info", + "docstring": "Sets the slice info for this . Args: save_slice_info: A object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:_set_save_slice_info arg:self arg:save_slice_info arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "get_trtengineop_io_dtypes", + "source_code": "def get_trtengineop_io_dtypes(node, key):\n return _convert_dtype_id_to_str(node.attr[key].list.type)", + "docstring": "Returns the input/output dtypes of a TRTEngineOp.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\utils.py", + "ast_data": "FunctionDef name:get_trtengineop_io_dtypes arg:node arg:key arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "convert_to_list", + "source_code": "def convert_to_list(self, item, separator):\n out = None\n if not isinstance(item, list):\n if 'range' in item:\n out = [item]\n else:\n out = item.split(separator)\n for i in range(len(out)):\n out[i] = out[i].replace(',', '')\n else:\n out = [item]\n return out", + "docstring": "Converts a string into a list with a separator. Args: item: String that needs to be separated into a list by a given separator. List item is also accepted but will take no effect. separator: String with which the will be splited. Returns: List that is a splited version of a given input string. e.g. Input: with separator Output: [1.0, 2.0, 3.0]", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\compat_checker\\compat_checker.py", + "ast_data": "FunctionDef name:convert_to_list arg:self arg:item arg:separator arguments arg arg arg Assign If Call If Compare Assign Assign Call For Call Call Assign Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "evaluate", + "source_code": "def evaluate(self, points):\n points = atleast_2d(asarray(points))\n d, m = points.shape\n if d != self.d:\n if d == 1 and m == self.d:\n points = reshape(points, (self.d, 1))\n m = 1\n else:\n msg = f'points have dimension {d}, dataset has dimension {self.d}'\n raise ValueError(msg)\n output_dtype, spec = _get_output_dtype(self.covariance, points)\n result = gaussian_kernel_estimate[spec](self.dataset.T, self.weights[:, None], points.T, self.cho_cov, output_dtype)\n return result[:, 0]", + "docstring": "Evaluate the estimated pdf on a set of points. Parameters ---------- points : (# of dimensions, # of points)-array Alternatively, a (# of dimensions,) vector can be passed in and treated as a single point. Returns ------- values : (# of points,)-array The values at each point. Raises ------ ValueError : if the dimensionality of the input points is different than the dimensionality of the KDE.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_kde.py", + "ast_data": "FunctionDef name:evaluate arg:self arg:points arguments arg arg Assign Call Call Assign If Compare If BoolOp Compare Compare Assign Call Assign Assign Raise Call Assign Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "hermeval", + "source_code": "def hermeval(x, c, tensor=True):\n c = np.array(c, ndmin=1, copy=None)\n if c.dtype.char in '?bBhHiIlLqQpP':\n c = c.astype(np.double)\n if isinstance(x, (tuple, list)):\n x = np.asarray(x)\n if isinstance(x, np.ndarray) and tensor:\n c = c.reshape(c.shape + (1,) * x.ndim)\n if len(c) == 1:\n c0 = c[0]\n c1 = 0\n elif len(c) == 2:\n c0 = c[0]\n c1 = c[1]\n else:\n nd = len(c)\n c0 = c[-2]\n c1 = c[-1]\n for i in range(3, len(c) + 1):\n tmp = c0\n nd = nd - 1\n c0 = c[-i] - c1 * (nd - 1)\n c1 = tmp + c1 * x\n return c0 + c1 * x", + "docstring": "Evaluate an HermiteE series at points x. If is of length `xxccxctensortensortensorxxcccxcxxcc` is multidimensional. The default value is True. Returns ------- values : ndarray, algebra_like The shape of the return value is described above. See Also -------- hermeval2d, hermegrid2d, hermeval3d, hermegrid3d Notes ----- The evaluation uses Clenshaw recursion, aka synthetic division. Examples -------- >>> from numpy.polynomial.hermite_e import hermeval >>> coef = [1,2,3] >>> hermeval(1, coef) 3.0 >>> hermeval([[1,2],[3,4]], coef) array([[ 3., 14.], [31., 54.]])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\hermite_e.py", + "ast_data": "FunctionDef name:hermeval arg:x arg:c arg:tensor arguments arg arg arg Assign Call If Compare Assign Call If Call Assign Call If BoolOp Call Assign Call If Compare Call Assign Assign If Compare Call Assign Assign Assign Call Assign Assign For Call Call Assign Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "name", + "source_code": "@property\ndef name(self):\n return self._name", + "docstring": "Returns the (non-unique, optional) name of this symbolic Keras value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_accumulate_n_grad", + "source_code": "@ops.RegisterGradient('AccumulateNV2')\ndef _accumulate_n_grad(op, grad):\n return [grad] * len(op.inputs)", + "docstring": "Same as gradient for AddN. Copies the gradient to all inputs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:_accumulate_n_grad arg:op arg:grad arguments arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "CurveFilledA", + "source_code": "@_register_style(_style_list, name='<|-')\nclass CurveFilledA(_Curve):\n arrow = '<|-'", + "docstring": "An arrow with filled triangle head at the start.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "ClassDef name:CurveFilledA Assign Call" + }, + { + "library": "authlib", + "name": "InvalidTokenError", + "source_code": "class InvalidTokenError(OAuth2Error):\n error = 'invalid_token'\n description = 'The access token provided is expired, revoked, malformed, or invalid for other reasons.'\n status_code = 401\n\n def __init__(self, description=None, uri=None, status_code=None, state=None, realm=None, **extra_attributes):\n super().__init__(description, uri, status_code, state)\n self.realm = realm\n self.extra_attributes = extra_attributes\n\n def get_headers(self):\n headers = super().get_headers()\n extras = []\n if self.realm:\n extras.append(f'realm=\"{self.realm}\"')\n if self.extra_attributes:\n extras.extend([f'{k}=\"{self.extra_attributes[k]}\"' for k in self.extra_attributes])\n extras.append(f'error=\"{self.error}\"')\n error_description = self.get_error_description()\n extras.append(f'error_description=\"{error_description}\"')\n headers.append(('WWW-Authenticate', 'Bearer ' + ', '.join(extras)))\n return headers", + "docstring": "The access token provided is expired, revoked, malformed, or invalid for other reasons. The resource SHOULD respond with the HTTP 401 (Unauthorized) status code. The client MAY request a new access token and retry the protected resource request.", + "type": "class", + "file_path": "authlib\\authlib\\oauth2\\rfc6750\\errors.py", + "ast_data": "ClassDef name:InvalidTokenError Assign Assign Assign FunctionDef name:__init__ arg:self arg:description arg:uri arg:status_code arg:state arg:realm arguments arg arg arg arg arg arg arg Call Call Assign Assign FunctionDef name:get_headers arg:self arguments arg Assign Call Call Assign If Call If Call Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "authlib", + "name": "check_endpoint_auth_method", + "source_code": "def check_endpoint_auth_method(self, method, endpoint):\n raise NotImplementedError()", + "docstring": "Check if client support the given method for the given endpoint. There is a `RFC7591RFC7591`:", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py", + "ast_data": "FunctionDef name:check_endpoint_auth_method arg:self arg:method arg:endpoint arguments arg arg arg Raise Call" + }, + { + "library": "scikit-learn", + "name": "_color_brew", + "source_code": "def _color_brew(n):\n color_list = []\n s, v = (0.75, 0.9)\n c = s * v\n m = v - c\n for h in np.arange(25, 385, 360.0 / n).astype(int):\n h_bar = h / 60.0\n x = c * (1 - abs(h_bar % 2 - 1))\n rgb = [(c, x, 0), (x, c, 0), (0, c, x), (0, x, c), (x, 0, c), (c, 0, x), (c, x, 0)]\n r, g, b = rgb[int(h_bar)]\n rgb = [int(255 * (r + m)), int(255 * (g + m)), int(255 * (b + m))]\n color_list.append(rgb)\n return color_list", + "docstring": "Generate n colors with equally spaced hues. Parameters ---------- n : int The number of colors required. Returns ------- color_list : list, length n List of n tuples of form (R, G, B) being the components of each color.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\tree\\_export.py", + "ast_data": "FunctionDef name:_color_brew arg:n arguments arg Assign Assign Assign Assign For Call Call Assign Assign Call Assign Assign Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_pre_forward_unshard", + "source_code": "@no_type_check\ndef _pre_forward_unshard(state: _FSDPState, handle: Optional[FlatParamHandle]) -> None:\n if not handle:\n return\n if not handle._prefetched:\n _unshard(state, handle, state._unshard_stream, state._pre_unshard_stream)\n handle._needs_pre_forward_unshard = False\n if not torch.distributed._functional_collectives.is_torchdynamo_compiling():\n current_stream = state._device_handle.current_stream()\n if state._unshard_event is not None:\n current_stream.wait_event(state._unshard_event)\n state._unshard_event = None\n else:\n current_stream.wait_stream(state._unshard_stream)\n with torch.profiler.record_function('FullyShardedDataParallel._pre_forward_prefetch'):\n _prefetch_handle(state, handle, _PrefetchMode.FORWARD)", + "docstring": "Unshards parameters in the pre-forward.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py", + "ast_data": "FunctionDef name:_pre_forward_unshard arg:state arg:handle arguments arg arg If Return return:no If Call Assign If Call Assign Call If Compare Call Assign Call With Call Call" + }, + { + "library": "scikit-learn", + "name": "_fit_transform_one", + "source_code": "def _fit_transform_one(transformer, X, y, weight, message_clsname='', message=None, params=None):\n params = params or {}\n with _print_elapsed_time(message_clsname, message):\n if hasattr(transformer, 'fit_transform'):\n res = transformer.fit_transform(X, y, **params.get('fit_transform', {}))\n else:\n res = transformer.fit(X, y, **params.get('fit', {})).transform(X, **params.get('transform', {}))\n if weight is None:\n return (res, transformer)\n return (res * weight, transformer)", + "docstring": "Fits ``.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:_fit_transform_one arg:transformer arg:X arg:y arg:weight arg:message_clsname arg:message arg:params arguments arg arg arg arg arg arg arg Assign BoolOp With Call If Call Assign Call Call Assign Call Call Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "to_empty", + "source_code": "def to_empty(self, *, device: Optional[DeviceLikeType], recurse: bool=True) -> Self:\n return self._apply(lambda t: torch.empty_like(t, device=device), recurse=recurse)", + "docstring": "Move the parameters and buffers to the specified device without copying storage. Args: device (:class:): The desired device of the parameters and buffers in this module. recurse (bool): Whether parameters and buffers of submodules should be recursively moved to the specified device. Returns: Module: self", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:to_empty arg:self arguments arg arg arg Return return:yes Call arguments arg Call" + }, + { + "library": "matplotlib", + "name": "add_callback", + "source_code": "def add_callback(self, func):\n return self._callbacks.connect('pchanged', lambda: func(self))", + "docstring": "Add a callback function that will be called whenever one of the 's properties changes. Parameters ---------- func : callable The callback function. It must have the signature:: def func(artist: Artist) -> Any where *artist* is the calling . Return values may exist but are ignored. Returns ------- int The observer id associated with the callback. This id can be used for removing the callback with later. See Also -------- remove_callback", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:add_callback arg:self arg:func arguments arg arg Return return:yes Call arguments Call" + }, + { + "library": "kornia", + "name": "JigsawGenerator", + "source_code": "class JigsawGenerator(RandomGeneratorBase):\n\n def __init__(self, grid: Tuple[int, int]=(4, 4), ensure_perm: bool=True) -> None:\n super().__init__()\n self.grid = grid\n self.ensure_perm = ensure_perm\n\n def __repr__(self) -> str:\n repr = f'grid={self.grid}'\n return repr\n\n def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:\n self._device = device\n self._dtype = dtype\n\n def forward(self, batch_shape: Tuple[int, ...], same_on_batch: bool=False) -> Dict[str, torch.Tensor]:\n batch_size = batch_shape[0]\n _common_param_check(batch_size, same_on_batch)\n perm_times = self.grid[0] * self.grid[1]\n if batch_size == 0:\n rand_ids = torch.zeros([0, perm_times], device=self._device)\n elif same_on_batch:\n rand_ids = randperm(perm_times, ensure_perm=self.ensure_perm, device=self._device)\n rand_ids = torch.stack([rand_ids] * batch_size)\n else:\n rand_ids = torch.stack([randperm(perm_times, ensure_perm=self.ensure_perm, device=self._device) for _ in range(batch_size)])\n return {'permutation': rand_ids}", + "docstring": "Generate Jigsaw permutation indices for a batch of inputs. Args: grid: the Jigsaw puzzle grid. e.g. (2, 2) means each output will mix image patches in a 2x2 grid. Returns: A dict of parameters to be passed for transformation. - permutation (Tensor): Jigsaw permutation arrangement. Note: The generated random numbers are not reproducible across different devices and dtypes. By default, the parameters will be generated on CPU in float32. This can be changed by calling ``.", + "type": "class", + "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\jigsaw.py", + "ast_data": "ClassDef name:JigsawGenerator FunctionDef name:__init__ arg:self arg:grid arg:ensure_perm arguments arg arg arg Call Call Assign Assign FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes FunctionDef name:make_samplers arg:self arg:device arg:dtype arguments arg arg arg Assign Assign FunctionDef name:forward arg:self arg:batch_shape arg:same_on_batch arguments arg arg arg Assign Call Assign If Compare Assign Call If Assign Call Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "seaborn", + "name": "label", + "source_code": "def label(self, formatter: Formatter | None=None) -> Nominal:\n new = copy(self)\n new._label_params = {'formatter': formatter}\n return new", + "docstring": "Configure the selection of labels for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. At the moment, it is probably not very useful. Parameters ---------- formatter : :class: subclass Pre-configured matplotlib formatter; other parameters will not be used. Returns ------- scale Copy of self with new tick configuration.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\scales.py", + "ast_data": "FunctionDef name:label arg:self arg:formatter arguments arg arg Assign Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "_supported", + "source_code": "def _supported(self, a, b):\n uncensored = self._uncensored\n uncensored = uncensored[(a < uncensored) & (uncensored < b)]\n left = self._left\n left = left[a < left]\n right = self._right\n right = right[right < b]\n interval = self._interval\n interval = interval[(a < interval[:, 1]) & (interval[:, 0] < b)]\n return CensoredData(uncensored, left=left, right=right, interval=interval)", + "docstring": "Return a subset of self containing the values that are in (or overlap with) the interval (a, b).", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_censored_data.py", + "ast_data": "FunctionDef name:_supported arg:self arg:a arg:b arguments arg arg arg Assign Assign Compare Compare Assign Assign Compare Assign Assign Compare Assign Assign Compare Compare Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "clear", + "source_code": "def clear(self):\n self._pos = -1\n self._elements = []", + "docstring": "Empty the stack.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:clear arg:self arguments arg Assign Assign" + }, + { + "library": "django", + "name": "ContinuousRangeField", + "source_code": "class ContinuousRangeField(RangeField):\n\n def __init__(self, *args, default_bounds=CANONICAL_RANGE_BOUNDS, **kwargs):\n if default_bounds not in ('[)', '(]', '()', '[]'):\n raise ValueError(\"default_bounds must be one of '[)', '(]', '()', or '[]'.\")\n self.default_bounds = default_bounds\n super().__init__(*args, **kwargs)\n\n def get_prep_value(self, value):\n if isinstance(value, (list, tuple)):\n return self.range_type(value[0], value[1], self.default_bounds)\n return super().get_prep_value(value)\n\n def formfield(self, **kwargs):\n kwargs.setdefault('default_bounds', self.default_bounds)\n return super().formfield(**kwargs)\n\n def deconstruct(self):\n name, path, args, kwargs = super().deconstruct()\n if self.default_bounds and self.default_bounds != CANONICAL_RANGE_BOUNDS:\n kwargs['default_bounds'] = self.default_bounds\n return (name, path, args, kwargs)", + "docstring": "Continuous range field. It allows specifying default bounds for list and tuple inputs.", + "type": "class", + "file_path": "django\\django\\contrib\\postgres\\fields\\ranges.py", + "ast_data": "ClassDef name:ContinuousRangeField FunctionDef name:__init__ arg:self arguments arg arg arg arg If Compare Raise Call Assign Call Call FunctionDef name:get_prep_value arg:self arg:value arguments arg arg If Call Return return:yes Call Return return:yes Call Call FunctionDef name:formfield arg:self arguments arg arg Call Return return:yes Call Call FunctionDef name:deconstruct arg:self arguments arg Assign Call Call If BoolOp Compare Assign Return return:yes" + }, + { + "library": "pandas", + "name": "_is_numeric", + "source_code": "@property\ndef _is_numeric(self) -> bool:\n return False", + "docstring": "Whether columns with this dtype should be considered numeric. By default ExtensionDtypes are assumed to be non-numeric. They'll be excluded from operations that exclude non-numeric columns, like (groupby) reductions, plotting, etc.", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\base.py", + "ast_data": "FunctionDef name:_is_numeric arg:self arguments arg Return return:yes" + }, + { + "library": "seaborn", + "name": "__call__", + "source_code": "def __call__(self, data, var):\n vals = data[var]\n weights = data['weight']\n estimate = np.average(vals, weights=weights)\n if self.error_method == 'ci' and len(data) > 1:\n\n def error_func(x, w):\n return np.average(x, weights=w)\n boots = bootstrap(vals, weights, func=error_func, **self.boot_kws)\n err_min, err_max = _percentile_interval(boots, self.error_level)\n else:\n err_min = err_max = np.nan\n return pd.Series({var: estimate, f'{var}min': err_min, f'{var}max': err_max})", + "docstring": "Aggregate over column of with estimate and error interval.", + "type": "method", + "file_path": "seaborn\\seaborn\\_statistics.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:data arg:var arguments arg arg arg Assign Assign Assign Call If BoolOp Compare Compare Call FunctionDef name:error_func arg:x arg:w arguments arg arg Return return:yes Call Assign Call Assign Call Assign Return return:yes Call" + }, + { + "library": "kornia", + "name": "_infer_batch_shape3d", + "source_code": "def _infer_batch_shape3d(input: Union[Tensor, Tuple[Tensor, Tensor]]) -> torch.Size:\n if isinstance(input, tuple):\n tensor = _transform_input3d(input[0])\n else:\n tensor = _transform_input3d(input)\n return tensor.shape", + "docstring": "Infer input shape. Input may be either (tensor,) or (tensor, transform_matrix)", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py", + "ast_data": "FunctionDef name:_infer_batch_shape3d arg:input arguments arg If Call Assign Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "font_effects", + "source_code": "@property\ndef font_effects(self):\n return self._get_pdftexmap_entry().effects", + "docstring": "The \"font effects\" dict for this glyph. This dict contains the values for this glyph of SlantFont and ExtendFont (if any), read off :file:.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\dviread.py", + "ast_data": "FunctionDef name:font_effects arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_collect_coverage", + "source_code": "@staticmethod\ndef _collect_coverage(segments: list[LlvmCoverageSegment]) -> tuple[list[int], list[int]]:\n covered_lines: set[int] = set()\n uncovered_lines: set[int] = set()\n prev_segment = LlvmCoverageSegment(1, 0, 0, 0, 0, None)\n for segment in segments:\n covered_range, uncovered_range = segment.get_coverage(prev_segment)\n covered_lines.update(covered_range)\n uncovered_lines.update(uncovered_range)\n prev_segment = segment\n uncovered_lines.difference_update(covered_lines)\n return (sorted(covered_lines), sorted(uncovered_lines))", + "docstring": "Stateful parsing of coverage segments.", + "type": "method", + "file_path": "pytorch\\tools\\code_coverage\\package\\tool\\parser\\llvm_coverage_parser.py", + "ast_data": "FunctionDef name:_collect_coverage arg:segments arguments arg Call Call Assign Call For Assign Call Call Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_check_model_use_buffer_offset", + "source_code": "def _check_model_use_buffer_offset(model_object):\n if not model_object.metadata:\n return False\n for meta in model_object.metadata:\n if meta.name.decode('utf-8') == 'buffer_location':\n return True\n return False", + "docstring": "Checks if a model object uses buffer offsets to store constant buffers. Args: model_object: tflite model, a python object Returns: True of the model_object has the metadata entry \"buffer_location\" False otherwise", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:_check_model_use_buffer_offset arg:model_object arguments arg If Return return:yes For If Compare Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "remote_parameters", + "source_code": "def remote_parameters(self, recurse: bool=True) -> list[rpc.RRef[Parameter]]:\n return rpc.rpc_sync(self.on, _param_rrefs, args=(self.module_rref, recurse))", + "docstring": "Return a list of :class: pointing to the remote module's parameters. This can typically be used in conjunction with :class:. Args: recurse (bool): if True, then returns parameters of the remote module and all submodules of the remote module. Otherwise, returns only parameters that are direct members of the remote module. Returns: A list of :class: (``) to remote module's parameters.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\nn\\api\\remote_module.py", + "ast_data": "FunctionDef name:remote_parameters arg:self arg:recurse arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "last_executed_query", + "source_code": "def last_executed_query(self, cursor, sql, params):\n\n def to_string(s):\n return force_str(s, strings_only=True, errors='replace')\n if isinstance(params, (list, tuple)):\n u_params = tuple((to_string(val) for val in params))\n elif params is None:\n u_params = ()\n else:\n u_params = {to_string(k): to_string(v) for k, v in params.items()}\n return 'QUERY = %r - PARAMS = %r' % (sql, u_params)", + "docstring": "Return a string of the query last executed by the given cursor, with placeholders replaced with actual values. is the raw query containing placeholders and is the sequence of parameters. These are used by default, but this method exists for database backends to provide a better implementation according to their own quoting schemes.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\operations.py", + "ast_data": "FunctionDef name:last_executed_query arg:self arg:cursor arg:sql arg:params arguments arg arg arg arg FunctionDef name:to_string arg:s arguments arg Return return:yes Call If Call Assign Call Call If Compare Assign Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "add_variable", + "source_code": "@doc_controls.do_not_doc_inheritable\ndef add_variable(self, *args, **kwargs):\n warnings.warn('`layer.add_variable` is deprecated and will be removed in a future version. Please use `layer.add_weight` method instead.')\n return self.add_weight(*args, **kwargs)", + "docstring": "Deprecated, do NOT use! Alias for .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:add_variable arg:self arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "dedode_detector_L", + "source_code": "def dedode_detector_L(amp_dtype: torch.dtype=torch.float16) -> DeDoDeDetector:\n NUM_PROTOTYPES = 1\n residual = True\n hidden_blocks = 8\n amp = True\n conv_refiner = nn.ModuleDict({'8': ConvRefiner(512, 512, 256 + NUM_PROTOTYPES, hidden_blocks=hidden_blocks, residual=residual, amp=amp, amp_dtype=amp_dtype), '4': ConvRefiner(256 + 256, 256, 128 + NUM_PROTOTYPES, hidden_blocks=hidden_blocks, residual=residual, amp=amp, amp_dtype=amp_dtype), '2': ConvRefiner(128 + 128, 128, 64 + NUM_PROTOTYPES, hidden_blocks=hidden_blocks, residual=residual, amp=amp, amp_dtype=amp_dtype), '1': ConvRefiner(64 + 64, 64, 1 + NUM_PROTOTYPES, hidden_blocks=hidden_blocks, residual=residual, amp=amp, amp_dtype=amp_dtype)})\n encoder = VGG19(amp=amp, amp_dtype=amp_dtype)\n decoder = Decoder(conv_refiner)\n model = DeDoDeDetector(encoder=encoder, decoder=decoder)\n return model", + "docstring": "Get DeDoDe descriptor of type L.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\dedode\\dedode_models.py", + "ast_data": "FunctionDef name:dedode_detector_L arg:amp_dtype arguments arg Assign Assign Assign Assign Assign Call Call Call Call Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "log", + "source_code": "def log(X, /):\n if np.any(X.support()[0] < 0):\n message = 'The logarithm of a random variable is only implemented when the support is non-negative.'\n raise NotImplementedError(message)\n return MonotonicTransformedDistribution(X, g=np.log, h=np.exp, dh=np.exp, logdh=lambda u: u)", + "docstring": "Natural logarithm of a non-negative random variable Parameters ---------- X : The random variable :math: with positive support. Returns ------- Y : A random variable :math:. Examples -------- Suppose we have a gamma distributed random variable :math:: >>> import numpy as np >>> from scipy import stats >>> Gamma = stats.make_distribution(stats.gamma) >>> X = Gamma(a=1.0) We wish to have a exp-gamma distributed random variable :math:, a random variable whose natural exponential is :math:. If :math: is to be the natural exponential of :math:, then we must take :math: to be the natural logarithm of :math:. >>> Y = stats.log(X) To demonstrate that `Xexp(y)`')) >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py", + "ast_data": "FunctionDef name:log arguments arg If Call Compare Call Assign Raise Call Return return:yes Call arguments arg" + }, + { + "library": "tensorflow", + "name": "bessel_i1e", + "source_code": "@tf_export('math.bessel_i1e', 'math.special.bessel_i1e')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef bessel_i1e(x, name=None):\n with ops.name_scope(name, 'bessel_i1e', [x]):\n return gen_special_math_ops.bessel_i1e(x)", + "docstring": "Computes the Bessel i1e function of element-wise. Modified Bessel function of order 1. >>> tf.math.special.bessel_i1e([-1., -0.5, 0.5, 1.]).numpy() array([-0.20791042, -0.15642083, 0.15642083, 0.20791042], dtype=float32) Args: x: A or . Must be one of the following types: , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.i1e @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py", + "ast_data": "FunctionDef name:bessel_i1e arg:x arg:name arguments arg arg With Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "to_batched_tensor_list", + "source_code": "def to_batched_tensor_list(element_spec, element):\n return _to_tensor_list_helper(lambda state, spec, component: state + spec._to_batched_tensor_list(component), element_spec, element)", + "docstring": "Returns a tensor list representation of the element. Args: element_spec: A nested structure of objects representing to element type specification. element: The element to convert to tensor list representation. Returns: A tensor list representation of . Raises: ValueError: If and do not have the same number of elements or if the two structures are not nested in the same way or the rank of any of the tensors in the tensor list representation is 0. TypeError: If and differ in the type of sequence in any of their substructures.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\util\\structure.py", + "ast_data": "FunctionDef name:to_batched_tensor_list arg:element_spec arg:element arguments arg arg Return return:yes Call arguments arg arg arg Call" + }, + { + "library": "tensorflow", + "name": "from_dense", + "source_code": "@tf_export('sparse.from_dense')\ndef from_dense(tensor, name=None):\n with ops.name_scope(name, 'dense_to_sparse'):\n tensor = ops.convert_to_tensor(tensor)\n indices = array_ops.where_v2(math_ops.not_equal(tensor, array_ops.zeros_like(tensor)))\n values = array_ops.gather_nd(tensor, indices)\n shape = array_ops.shape(tensor, out_type=dtypes.int64)\n return sparse_tensor.SparseTensor(indices, values, shape)", + "docstring": "Converts a dense tensor into a sparse tensor. Only elements not equal to zero will be present in the result. The resulting has the same dtype and shape as the input. >>> sp = tf.sparse.from_dense([0, 0, 3, 0, 1]) >>> sp.shape.as_list() [5] >>> sp.values.numpy() array([3, 1], dtype=int32) >>> sp.indices.numpy() array([[2], [4]]) Args: tensor: A dense to be converted to a . name: Optional name for the op. Returns: The .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", + "ast_data": "FunctionDef name:from_dense arg:tensor arg:name arguments arg arg With Call Assign Call Assign Call Call Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "fast_forward", + "source_code": "def fast_forward(self, n):\n if self.num_generated == 0:\n torch._sobol_engine_ff_(self.quasi, n - 1, self.sobolstate, self.dimension, self.num_generated)\n else:\n torch._sobol_engine_ff_(self.quasi, n, self.sobolstate, self.dimension, self.num_generated - 1)\n self.num_generated += n\n return self", + "docstring": "Function to fast-forward the state of the `nn` samples without using the samples. Args: n (Int): The number of steps to fast-forward by.", + "type": "method", + "file_path": "pytorch\\torch\\quasirandom.py", + "ast_data": "FunctionDef name:fast_forward arg:self arg:n arguments arg arg If Compare Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_all_collection_keys", + "source_code": "def get_all_collection_keys() -> list[str]:\n return get_default_graph().get_all_collection_keys()", + "docstring": "Returns a list of collections used in the default graph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:get_all_collection_keys arguments Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_get_oob_predictions", + "source_code": "@staticmethod\ndef _get_oob_predictions(tree, X):\n y_pred = tree.predict_proba(X, check_input=False)\n y_pred = np.asarray(y_pred)\n if y_pred.ndim == 2:\n y_pred = y_pred[..., np.newaxis]\n else:\n y_pred = np.rollaxis(y_pred, axis=0, start=3)\n return y_pred", + "docstring": "Compute the OOB predictions for an individual tree. Parameters ---------- tree : DecisionTreeClassifier object A single decision tree classifier. X : ndarray of shape (n_samples, n_features) The OOB samples. Returns ------- y_pred : ndarray of shape (n_samples, n_classes, n_outputs) The OOB associated predictions.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py", + "ast_data": "FunctionDef name:_get_oob_predictions arg:tree arg:X arguments arg arg Assign Call Assign Call If Compare Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "read_value", + "source_code": "def read_value(self):\n return array_ops.identity(self._variable, name='read')", + "docstring": "Returns the value of this variable, read in the current context. Can be different from value() if it's on another device, with control dependencies, etc. Returns: A containing the value of the variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", + "ast_data": "FunctionDef name:read_value arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_AddOpInternal", + "source_code": "def _AddOpInternal(self, op: ops.Operation):\n if not op.inputs:\n self._RemoveExternalControlEdges(op)\n if not any((util.OpInContext(input_op, self) for input_op in op.control_inputs)):\n op._add_control_input(self._pivot.op)\n else:\n for index in range(len(op.inputs)):\n x = op.inputs[index]\n if op.type == 'Merge' and x.op.type == 'NextIteration':\n real_x = x\n else:\n real_x = self.AddValue(x)\n if real_x != x:\n op._update_input(index, real_x)\n self._RemoveExternalControlEdges(op)\n if op.graph._is_function(op.type) or op.type == 'SymbolicGradient':\n op._add_control_input(self._pivot.op)\n output_names = [x.name for x in op.outputs]\n ctxt = self\n while ctxt is not None:\n ctxt._values.update(output_names)\n ctxt = ctxt._outer_context\n if self._outer_context or not util.IsLoopExit(op):\n op.graph.prevent_fetching(op)\n if self._outer_context:\n self._outer_context.AddInnerOp(op)", + "docstring": "Add to the current context.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:_AddOpInternal arg:self arg:op arguments arg arg If Call If Call Call Call For Call Call Assign If BoolOp Compare Compare Assign Assign Call If Compare Call Call If BoolOp Call Compare Call Assign Assign While Compare Call Assign If BoolOp Call Call If Call" + }, + { + "library": "tensorflow", + "name": "local_conv1d", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):\n output_shape = (kernel.shape[0],)\n return local_conv(inputs, kernel, kernel_size, strides, output_shape, data_format)", + "docstring": "Apply 1D conv with un-shared weights. Args: inputs: 3D tensor with shape: (batch_size, steps, input_dim) if data_format is \"channels_last\" or (batch_size, input_dim, steps) if data_format is \"channels_first\". kernel: the unshared weight for convolution, with shape (output_length, feature_dim, filters). kernel_size: a tuple of a single integer, specifying the length of the 1D convolution window. strides: a tuple of a single integer, specifying the stride length of the convolution. data_format: the data format, channels_first or channels_last. Returns: A 3d tensor with shape: (batch_size, output_length, filters) if data_format='channels_first' or 3D tensor with shape: (batch_size, filters, output_length) if data_format='channels_last'.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:local_conv1d arg:inputs arg:kernel arg:kernel_size arg:strides arg:data_format arguments arg arg arg arg arg Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "modules", + "source_code": "def modules(self) -> Iterator['Module']:\n for _, module in self.named_modules():\n yield module", + "docstring": "Return an iterator over all modules in the network. Yields: Module: a module in the network Note: Duplicate modules are returned only once. In the following example, `` will be returned only once. Example:: >>> l = nn.Linear(2, 2) >>> net = nn.Sequential(l, l) >>> for idx, m in enumerate(net.modules()): ... print(idx, '->', m) 0 -> Sequential( (0): Linear(in_features=2, out_features=2, bias=True) (1): Linear(in_features=2, out_features=2, bias=True) ) 1 -> Linear(in_features=2, out_features=2, bias=True)", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:modules arg:self arguments arg For Call" + }, + { + "library": "django", + "name": "unescape_string_literal", + "source_code": "@keep_lazy_text\ndef unescape_string_literal(s):\n if not s or s[0] not in '\"\\'' or s[-1] != s[0]:\n raise ValueError('Not a string literal: %r' % s)\n quote = s[0]\n return s[1:-1].replace('\\\\%s' % quote, quote).replace('\\\\\\\\', '\\\\')", + "docstring": "Convert quoted string literals to unquoted strings with escaped quotes and backslashes unquoted:: >>> unescape_string_literal('\"abc\"') 'abc' >>> unescape_string_literal(\"'abc'\") 'abc' >>> unescape_string_literal('\"a \\\"bc\\\"\"') 'a \"bc\"' >>> unescape_string_literal(\"'\\'ab\\' c'\") \"'ab' c\"", + "type": "function", + "file_path": "django\\django\\utils\\text.py", + "ast_data": "FunctionDef name:unescape_string_literal arg:s arguments arg If BoolOp Compare Compare Raise Call Assign Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "sample_points_2d", + "source_code": "def sample_points_2d(self, heights: Tensor, widths: Tensor, num_img_rays: Tensor) -> Dict[int, RaySampler.Points2D]:\n num_img_rays = num_img_rays.int()\n points2d_as_flat_tensors: Dict[int, RaySampler.Points2D_FlatTensors] = {}\n for camera_id, (height, width, n) in enumerate(zip(heights.tolist(), widths.tolist(), num_img_rays.tolist())):\n n_sqrt = int(math.sqrt(n))\n y_rand = torch.randperm(int(height), device=self._device, dtype=self._dtype)[:min(int(height), n_sqrt)]\n x_rand = torch.randperm(int(width), device=self._device, dtype=self._dtype)[:min(int(width), n_sqrt)]\n y_grid, x_grid = torch_meshgrid([y_rand, x_rand], indexing='ij')\n RaySampler._add_points2d_as_flat_tensors_to_num_ray_dict(n_sqrt * n_sqrt, x_grid, y_grid, camera_id, points2d_as_flat_tensors)\n return RaySampler._build_num_ray_dict_of_points2d(points2d_as_flat_tensors)", + "docstring": "Randomly sample pixel points in 2d over a regular row-column grid. Args: heights: tensor that holds scene camera image heights (can vary between cameras): math: . widths: tensor that holds scene camera image widths (can vary between cameras): math: . num_img_rays: tensor that holds the number of rays to randomly cast from each scene camera. Number of rows and columns is the square root of this value: int math: . Returns: dictionary of Points2D objects that holds information on pixel 2d coordinates of each ray and the camera id it was casted by: Dict[int, Points2D]", + "type": "method", + "file_path": "kornia\\kornia\\nerf\\samplers.py", + "ast_data": "FunctionDef name:sample_points_2d arg:self arg:heights arg:widths arg:num_img_rays arguments arg arg arg arg Assign Call For Call Call Call Call Call Assign Call Call Assign Call Call Call Call Assign Call Call Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "get_dataframe_repr_params", + "source_code": "def get_dataframe_repr_params() -> dict[str, Any]:\n from pandas.io.formats import console\n if get_option('display.expand_frame_repr'):\n line_width, _ = console.get_console_size()\n else:\n line_width = None\n return {'max_rows': get_option('display.max_rows'), 'min_rows': get_option('display.min_rows'), 'max_cols': get_option('display.max_columns'), 'max_colwidth': get_option('display.max_colwidth'), 'show_dimensions': get_option('display.show_dimensions'), 'line_width': line_width}", + "docstring": "Get the parameters used to repr(dataFrame) calls using DataFrame.to_string. Supplying these parameters to DataFrame.to_string is equivalent to calling ``. This is useful if you want to adjust the repr output. .. versionadded:: 1.4.0 Example ------- >>> import pandas as pd >>> >>> df = pd.DataFrame([[1, 2], [3, 4]]) >>> repr_params = pd.io.formats.format.get_dataframe_repr_params() >>> repr(df) == df.to_string(**repr_params) True", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\format.py", + "ast_data": "FunctionDef name:get_dataframe_repr_params arguments If Call Assign Call Assign Return return:yes Call Call Call Call Call" + }, + { + "library": "pandas", + "name": "infer_axes", + "source_code": "def infer_axes(self) -> bool:\n s = self.storable\n if s is None:\n return False\n self.get_attrs()\n return True", + "docstring": "infer the axes of my storer return a boolean indicating if we have a valid storer or not", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:infer_axes arg:self arguments arg Assign If Compare Return return:yes Call Return return:yes" + }, + { + "library": "scipy", + "name": "__init__", + "source_code": "def __init__(self, *system, **kwargs):\n if isinstance(system[0], LinearTimeInvariant):\n return\n super().__init__(**kwargs)\n self._A = None\n self._B = None\n self._C = None\n self._D = None\n self.A, self.B, self.C, self.D = abcd_normalize(*system)", + "docstring": "Initialize the state space lti/dlti system.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg If Call Return return:no Call Call Assign Assign Assign Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "get_checkpoint_factories_and_keys", + "source_code": "def get_checkpoint_factories_and_keys(object_names, object_map=None):\n checkpoint_factory_map = object_identity.ObjectIdentityDictionary()\n unmapped_registered_savers = collections.defaultdict(dict)\n for trackable, object_name in object_names.items():\n object_to_save = util.get_mapped_trackable(trackable, object_map)\n saver_name = registration.get_registered_saver_name(object_to_save)\n if saver_name:\n unmapped_registered_savers[saver_name][object_name] = trackable\n else:\n checkpoint_factory_map[trackable] = []\n for name, saveable_factory in saveable_object_util.saveable_objects_from_trackable(object_to_save).items():\n key_suffix = saveable_compat.get_saveable_name(object_to_save) or name\n checkpoint_key = trackable_utils.checkpoint_key(object_name, key_suffix)\n if not saveable_compat.force_checkpoint_conversion_enabled():\n name = key_suffix\n checkpoint_factory_map[trackable].append(_CheckpointFactoryData(factory=saveable_factory, name=name, checkpoint_key=checkpoint_key))\n return (checkpoint_factory_map, unmapped_registered_savers)", + "docstring": "Gets a map of saveable factories and corresponding checkpoint keys. Args: object_names: a dictionary that maps objects to auto-generated string names. object_map: a dictionary mapping to copied objects. The copied objects are generated from which copies the object into another graph. Generally only resource objects (e.g. Variables, Tables) will be in this map. Returns: A tuple of ( Dictionary mapping trackable -> list of _CheckpointFactoryData, Dictionary mapping registered saver name -> {object name -> trackable})", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util_v1.py", + "ast_data": "FunctionDef name:get_checkpoint_factories_and_keys arg:object_names arg:object_map arguments arg arg Assign Call Assign Call For Call Assign Call Assign Call If Assign Assign For Call Call Assign BoolOp Call Assign Call If Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_finalize_func", + "source_code": "@def_function.function(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])\ndef _finalize_func(string_handle):\n iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(string_handle, **self._input_dataset._flat_structure)\n with ops.control_dependencies([resource_variable_ops.destroy_resource_op(iterator_resource, ignore_lookup_error=True)]):\n return array_ops.constant(0, dtypes.int64)", + "docstring": "Destroys the iterator resource created. Args: string_handle: An iterator string handle created by _init_func Returns: Tensor constant 0", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\prefetching_ops.py", + "ast_data": "FunctionDef name:_finalize_func arg:string_handle arguments arg Assign Call With Call Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "GetLoopConstantEnter", + "source_code": "def GetLoopConstantEnter(value):\n id_ops = {'Switch', 'RefSwitch', 'Identity', 'RefIdentity'}\n op = value.op\n while op.type in id_ops:\n op = op.inputs[0].op\n return op if IsLoopConstantEnter(op) else None", + "docstring": "Return the enter op if we can infer to be a loop invariant.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py", + "ast_data": "FunctionDef name:GetLoopConstantEnter arg:value arguments arg Assign Assign While Compare Assign Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "values", + "source_code": "def values(self, key):\n return [e.value for e in self.elements(key)]", + "docstring": "Return a sorted list of HeaderElement.value for the given header.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\httputil.py", + "ast_data": "FunctionDef name:values arg:self arg:key arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "_create_project_state", + "source_code": "def _create_project_state(self, with_applied_migrations=False):\n state = ProjectState(real_apps=self.loader.unmigrated_apps)\n if with_applied_migrations:\n full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)\n applied_migrations = {self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes}\n for migration, _ in full_plan:\n if migration in applied_migrations:\n migration.mutate_state(state, preserve=False)\n return state", + "docstring": "Create a project state including all the applications without migrations and applied migrations if with_applied_migrations=True.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\executor.py", + "ast_data": "FunctionDef name:_create_project_state arg:self arg:with_applied_migrations arguments arg arg Assign Call If Assign Call Call Assign Compare For If Compare Call Return return:yes" + }, + { + "library": "scipy", + "name": "Ackley01", + "source_code": "class Ackley01(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-35.0] * self.N, [35.0] * self.N))\n self.global_optimum = [[0 for _ in range(self.N)]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n u = sum(x ** 2)\n v = sum(cos(2 * pi * x))\n return -20.0 * exp(-0.2 * sqrt(u / self.N)) - exp(v / self.N) + 20.0 + exp(1.0)", + "docstring": "Ackley01 objective function. The Ackley01 [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Ackley01}}(x) = -20 e^{-0.2 \\sqrt{\\frac{1}{n} \\sum_{i=1}^n x_i^2}} - e^{\\frac{1}{n} \\sum_{i=1}^n \\cos(2 \\pi x_i)} + 20 + e Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Adorio, E. MVF - \"Multivariate Test Functions Library in C for Unconstrained Global Optimization\", 2005 TODO: the -0.2 factor in the exponent of the first term is given as -0.02 in Jamil et al.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_A.py", + "ast_data": "ClassDef name:Ackley01 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Call Return return:yes Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "get_visible", + "source_code": "def get_visible(self):\n return self._visible", + "docstring": "Get the visibility of the selector artists.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:get_visible arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "SubConfigProxy", + "source_code": "class SubConfigProxy:\n\n def __init__(self, config: object, prefix: str):\n super().__setattr__('_config', config)\n super().__setattr__('_prefix', prefix)\n\n def __setattr__(self, name: str, value: object) -> None:\n return self._config.__setattr__(self._prefix + name, value)\n\n def __getattr__(self, name: str) -> Any:\n return self._config.__getattr__(self._prefix + name)\n\n def __delattr__(self, name: str) -> None:\n return self._config.__delattr__(self._prefix + name)", + "docstring": "Shim to redirect to main config. maps to _config[\"triton.cudagraphs\"]", + "type": "class", + "file_path": "pytorch\\torch\\utils\\_config_module.py", + "ast_data": "ClassDef name:SubConfigProxy FunctionDef name:__init__ arg:self arg:config arg:prefix arguments arg arg arg Call Call Call Call FunctionDef name:__setattr__ arg:self arg:name arg:value arguments arg arg arg Return return:yes Call FunctionDef name:__getattr__ arg:self arg:name arguments arg arg Return return:yes Call FunctionDef name:__delattr__ arg:self arg:name arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "is_initialized", + "source_code": "def is_initialized():\n return _initialized and (not _is_in_bad_fork())", + "docstring": "Return whether PyTorch's MTIA state has been initialized.", + "type": "function", + "file_path": "pytorch\\torch\\mtia\\__init__.py", + "ast_data": "FunctionDef name:is_initialized arguments Return return:yes BoolOp Call" + }, + { + "library": "kornia", + "name": "inverse", + "source_code": "def inverse(self) -> Se3:\n r_inv = self.r.inverse()\n _t = -1 * self.t\n if isinstance(_t, int):\n raise TypeError('Unexpected integer from `-1 * translation`')\n return Se3(r_inv, r_inv * _t)", + "docstring": "Return the inverse transformation. Example: >>> s = Se3(So3.identity(), torch.ones(3)) >>> s_inv = s.inverse() >>> s_inv.r Parameter containing: tensor([1., -0., -0., -0.], requires_grad=True) >>> s_inv.t Parameter containing: tensor([-1., -1., -1.], requires_grad=True)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py", + "ast_data": "FunctionDef name:inverse arg:self arguments arg Assign Call Assign If Call Raise Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "nipy_spectral", + "source_code": "def nipy_spectral() -> None:\n set_cmap('nipy_spectral')", + "docstring": "Set the colormap to 'nipy_spectral'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", + "ast_data": "FunctionDef name:nipy_spectral arguments Call" + }, + { + "library": "pytorch", + "name": "LazyLinear", + "source_code": "class LazyLinear(LazyModuleMixin, Linear):\n cls_to_become = Linear\n weight: UninitializedParameter\n bias: UninitializedParameter\n\n def __init__(self, out_features: int, bias: bool=True, device=None, dtype=None) -> None:\n factory_kwargs = {'device': device, 'dtype': dtype}\n super().__init__(0, 0, False)\n self.weight = UninitializedParameter(**factory_kwargs)\n self.out_features = out_features\n if bias:\n self.bias = UninitializedParameter(**factory_kwargs)\n\n def reset_parameters(self) -> None:\n if not self.has_uninitialized_params() and self.in_features != 0:\n super().reset_parameters()\n\n def initialize_parameters(self, input) -> None:\n if self.has_uninitialized_params():\n with torch.no_grad():\n self.in_features = input.shape[-1]\n self.weight.materialize((self.out_features, self.in_features))\n if self.bias is not None:\n self.bias.materialize((self.out_features,))\n self.reset_parameters()\n if self.in_features == 0:\n assert input.shape[-1] == self.weight.shape[-1], f'The in_features inferred from input: {input.shape[-1]} is not equal to in_features from self.weight: {self.weight.shape[-1]}'\n self.in_features = input.shape[-1]", + "docstring": "A :class: module where is inferred. In this module, the and are of :class: class. They will be initialized after the first call to `torch.nn.LinearLineartorch.nn.modules.lazy.LazyModuleMixin(\\text{out\\_features}, \\text{in\\_features})\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})k = \\frac{1}{\\text{in\\_features}}(\\text{out\\_features})bias\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})k = \\frac{1}{\\text{in\\_features}}`", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\linear.py", + "ast_data": "ClassDef name:LazyLinear Assign FunctionDef name:__init__ arg:self arg:out_features arg:bias arg:device arg:dtype arguments arg arg arg arg arg Assign Call Call Assign Call Assign If Assign Call FunctionDef name:reset_parameters arg:self arguments arg If BoolOp Call Compare Call Call FunctionDef name:initialize_parameters arg:self arg:input arguments arg arg If Call With Call Assign Call If Compare Call Call If Compare Compare Assign" + }, + { + "library": "tensorflow", + "name": "currentframe", + "source_code": "def currentframe():\n return _inspect.stack()[1][0]", + "docstring": "TFDecorator-aware replacement for inspect.currentframe.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py", + "ast_data": "FunctionDef name:currentframe arguments Return return:yes Call" + }, + { + "library": "pytorch", + "name": "load_state_dict", + "source_code": "def load_state_dict(self, state_dict: dict[str, Any]):\n self.__dict__.update(state_dict)", + "docstring": "Load the scheduler's state. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Call" + }, + { + "library": "pytorch", + "name": "AHFeature", + "source_code": "class AHFeature:\n\n def __init__(self, name: str, value: Value, is_categorical: bool=False) -> None:\n self.name = name\n self.value = value\n self.is_categorical = is_categorical", + "docstring": "The context, that AutoHeuristic stores, is a list of features. AutoHeuristic needs to know whether a feature is categorical (i.e., not a continuous variable) to learn a machine learning model.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\autoheuristic\\autoheuristic_utils.py", + "ast_data": "ClassDef name:AHFeature FunctionDef name:__init__ arg:self arg:name arg:value arg:is_categorical arguments arg arg arg arg Assign Assign Assign" + }, + { + "library": "authlib", + "name": "add_params_to_qs", + "source_code": "def add_params_to_qs(query, params):\n if isinstance(params, dict):\n params = params.items()\n qs = urlparse.parse_qsl(query, keep_blank_values=True)\n qs.extend(params)\n return url_encode(qs)", + "docstring": "Extend a query with a list of two-tuples.", + "type": "function", + "file_path": "authlib\\authlib\\common\\urls.py", + "ast_data": "FunctionDef name:add_params_to_qs arg:query arg:params arguments arg arg If Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_wrap_and_check_metrics", + "source_code": "def _wrap_and_check_metrics(self, metrics):\n if not isinstance(metrics, dict):\n metrics = {self.METRICS_NAME: metrics}\n outputs = {}\n for key, value in metrics.items():\n if isinstance(value, tuple):\n metric_val, metric_op = value\n else:\n metric_val = value.result()\n assert len(value.updates) == 1\n metric_op = value.updates[0]\n key = self._check_output_key(key, self.METRICS_NAME)\n key = self._prefix_key(key, self.METRICS_NAME)\n val_name = key + self._SEPARATOR_CHAR + self.METRIC_VALUE_SUFFIX\n op_name = key + self._SEPARATOR_CHAR + self.METRIC_UPDATE_SUFFIX\n if not isinstance(metric_val, tensor.Tensor):\n raise ValueError('{} output value must be a Tensor; got {}.'.format(key, metric_val))\n if not (tensor_util.is_tensor(metric_op) or isinstance(metric_op, ops.Operation)):\n raise ValueError('{} update_op must be a Tensor or Operation; got {}.'.format(key, metric_op))\n metric_op_tensor = metric_op\n if not isinstance(metric_op, tensor.Tensor):\n with ops.control_dependencies([metric_op]):\n metric_op_tensor = constant_op.constant([], name='metric_op_wrapper')\n outputs[val_name] = metric_val\n outputs[op_name] = metric_op_tensor\n return outputs", + "docstring": "Handle the saving of metrics. Metrics is either a tuple of (value, update_op), or a dict of such tuples. Here, we separate out the tuples and create a dict with names to tensors. Args: metrics: Dict of metric results keyed by name. The values of the dict can be one of the following: (1) instance of class. (2) (metric_value, update_op) tuples, or a single tuple. metric_value must be a Tensor, and update_op must be a Tensor or Op. Returns: dict of output_names to tensors Raises: ValueError: if the dict key is not a string, or the metric values or ops are not tensors.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py", + "ast_data": "FunctionDef name:_wrap_and_check_metrics arg:self arg:metrics arguments arg arg If Call Assign Assign For Call If Call Assign Assign Call Compare Call Assign Assign Call Assign Call Assign Assign If Call Raise Call Call If BoolOp Call Call Raise Call Call Assign If Call With Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_make_random_matrix", + "source_code": "def _make_random_matrix(self, n_components, n_features):\n random_state = check_random_state(self.random_state)\n self.density_ = _check_density(self.density, n_features)\n return _sparse_random_matrix(n_components, n_features, density=self.density_, random_state=random_state)", + "docstring": "Generate the random projection matrix Parameters ---------- n_components : int Dimensionality of the target projection space. n_features : int Dimensionality of the original source space. Returns ------- components : sparse matrix of shape (n_components, n_features) The generated random matrix in CSR format.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\random_projection.py", + "ast_data": "FunctionDef name:_make_random_matrix arg:self arg:n_components arg:n_features arguments arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_safe_initial_value_from_tensor", + "source_code": "def _safe_initial_value_from_tensor(name, tensor, op_cache):\n op = tensor.op\n new_op = op_cache.get(op.name)\n if new_op is None:\n new_op = _safe_initial_value_from_op(name, op, op_cache)\n op_cache[op.name] = new_op\n return new_op.outputs[tensor.value_index]", + "docstring": "Replace dependencies on variables with their initialized values. Args: name: Variable name. tensor: A . The tensor to replace. op_cache: A dict mapping operation names to s. Used to memoize the results so as to avoid creating redundant operations. Returns: A compatible with . Any inputs that lead to variable values will be replaced with a corresponding graph that uses the variable's initialized values. This is done on a best-effort basis. If no modifications need to be made then will be returned unchanged.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:_safe_initial_value_from_tensor arg:name arg:tensor arg:op_cache arguments arg arg arg Assign Assign Call If Compare Assign Call Assign Return return:yes" + }, + { + "library": "kornia", + "name": "BoundingBox", + "source_code": "@dataclass(frozen=True)\nclass BoundingBox:\n data: tuple[float, float, float, float]\n data_format: BoundingBoxDataFormat", + "docstring": "Bounding box data class. Useful for representing bounding boxes in different formats for object detection. Args: data: tuple of bounding box data. The length of the tuple depends on the data format. data_format: bounding box data format.", + "type": "class", + "file_path": "kornia\\kornia\\models\\detection\\base.py", + "ast_data": "ClassDef name:BoundingBox Call" + }, + { + "library": "django", + "name": "allowed_origin_subdomains", + "source_code": "@cached_property\ndef allowed_origin_subdomains(self):\n allowed_origin_subdomains = defaultdict(list)\n for parsed in (urlsplit(origin) for origin in settings.CSRF_TRUSTED_ORIGINS if '*' in origin):\n allowed_origin_subdomains[parsed.scheme].append(parsed.netloc.lstrip('*'))\n return allowed_origin_subdomains", + "docstring": "A mapping of allowed schemes to list of allowed netlocs, where all subdomains of the netloc are allowed.", + "type": "method", + "file_path": "django\\django\\middleware\\csrf.py", + "ast_data": "FunctionDef name:allowed_origin_subdomains arg:self arguments arg Assign Call For Call Compare Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "set_extra_state", + "source_code": "def set_extra_state(self, state: Any) -> None:\n raise RuntimeError('Reached a code path in Module.set_extra_state() that should never be called. Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml to report this bug.')", + "docstring": "Set extra state contained in the loaded . This function is called from :func: to handle any extra state found within the . Implement this function and a corresponding :func: for your module if you need to store extra state within its . Args: state (dict): Extra state from the", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:set_extra_state arg:self arg:state arguments arg arg Raise Call" + }, + { + "library": "matplotlib", + "name": "set_mutation_aspect", + "source_code": "def set_mutation_aspect(self, aspect):\n self._mutation_aspect = aspect\n self.stale = True", + "docstring": "Set the aspect ratio of the bbox mutation. Parameters ---------- aspect : float", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:set_mutation_aspect arg:self arg:aspect arguments arg arg Assign Assign" + }, + { + "library": "pytorch", + "name": "NodeState", + "source_code": "class NodeState(str, Enum):\n INIT = 'INIT'\n RUNNING = 'RUNNING'\n SUCCEEDED = 'SUCCEEDED'\n FAILED = 'FAILED'", + "docstring": "The states that a node can be in rendezvous.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\elastic\\events\\api.py", + "ast_data": "ClassDef name:NodeState Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "fallback", + "source_code": "def fallback(self, fn, dispatch_key='', *, with_keyset=False):\n if torch._running_with_deploy():\n _library.utils.warn_deploy()\n return\n if dispatch_key == '':\n dispatch_key = self.dispatch_key\n if self.ns != '_':\n raise RuntimeError(f'Fallback can only be registered using libary fragment on the global namespace \"_\" but it is {self.ns}')\n assert dispatch_key != ''\n assert self.m is not None\n self.m.fallback(dispatch_key, fn, with_keyset)", + "docstring": "Registers the function implementation as the fallback for the given key. This function only works for a library with global namespace (\"_\"). Args: fn: function used as fallback for the given dispatch key or :func: to register a fallthrough. dispatch_key: dispatch key that the input function should be registered for. By default, it uses the dispatch key that the library was created with. with_keyset: flag controlling if the current dispatcher call keyset should be passed as the first argument to :attr: when calling. This should be used to create the appropriate keyset for redispatch calls. Example:: >>> my_lib = Library(\"_\", \"IMPL\") >>> def fallback_kernel(op, *args, **kwargs): >>> # Handle all autocast ops generically >>> # ... >>> my_lib.fallback(fallback_kernel, \"Autocast\")", + "type": "method", + "file_path": "pytorch\\torch\\library.py", + "ast_data": "FunctionDef name:fallback arg:self arg:fn arg:dispatch_key arguments arg arg arg arg If Call Call Return return:no If Compare Assign If Compare Raise Call Compare Compare Call" + }, + { + "library": "tensorflow", + "name": "_create_variables_and_slots", + "source_code": "def _create_variables_and_slots(self) -> Dict[str, Dict[str, tf_variables.Variable]]:\n variables = {}\n for stacked_table_name, tables in self._stacked_table_to_tables.items():\n variables[stacked_table_name] = self._create_variables(tables, stacked_table_name=stacked_table_name)\n return variables", + "docstring": "Create variables for TPU embeddings. Returns: A dict of dicts. The outer dict is keyed by the table names and the inner dicts are keyed by 'parameters' and the slot variable names.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py", + "ast_data": "FunctionDef name:_create_variables_and_slots arg:self arguments arg Assign For Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "reserve", + "source_code": "def reserve(self, n: int) -> Optional[str]:\n with self._lock:\n for lower_index in range(self._num_cores - n + 1):\n indices = tuple(range(lower_index, lower_index + n))\n if all((self._available[i] for i in indices)):\n for i in indices:\n self._available[i] = False\n lower_core = indices[0] + self._min_core_id\n upper_core = indices[-1] + self._min_core_id\n key = f'{lower_core}-{upper_core}' if n > 1 else f'{lower_core}'\n self._reservations[key] = indices\n return key\n return None", + "docstring": "Simple first-fit policy. If successful, return a string for . Otherwise, return None.", + "type": "method", + "file_path": "pytorch\\benchmarks\\instruction_counts\\execution\\runner.py", + "ast_data": "FunctionDef name:reserve arg:self arg:n arguments arg arg With For Call Assign Call Call If Call For Assign Assign Assign Assign Compare Assign Return return:yes Return return:no" + }, + { + "library": "pytorch", + "name": "sym_size", + "source_code": "def sym_size(self, sym):\n if symbol_is_type(sym, SymT.TMP):\n return self.lookup_cse_var(sym.name).indirect_indexing_size\n return self.halide_vars[sym]", + "docstring": "The size of an index symbol", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\halide.py", + "ast_data": "FunctionDef name:sym_size arg:self arg:sym arguments arg arg If Call Return return:yes Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "error", + "source_code": "def error(self, msg='', context='', severity=logging.INFO, traceback=False):\n exc_info = None\n if traceback:\n exc_info = _cperror._exc_info()\n self.error_log.log(severity, ' '.join((self.time(), context, msg)), exc_info=exc_info)", + "docstring": "Write the given ``.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cplogging.py", + "ast_data": "FunctionDef name:error arg:self arg:msg arg:context arg:severity arg:traceback arguments arg arg arg arg arg Assign If Assign Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "with_attributes", + "source_code": "@staticmethod\ndef with_attributes(name, checkpointable_objects=None, functions=None, copy_from=None):\n checkpointable_objects = checkpointable_objects or []\n functions = functions or []\n if copy_from is not None:\n for cls in copy_from:\n checkpointable_objects.extend(cls.all_checkpointable_objects)\n functions.extend(cls.all_functions)\n classdict = {'all_checkpointable_objects': set(checkpointable_objects), 'all_functions': set(functions)}\n return type(name, (SerializedAttributes,), classdict)", + "docstring": "Creates a subclass with all attributes as specified in the arguments. Args: name: Name of subclass checkpointable_objects: List of checkpointable objects to be serialized in the SavedModel. functions: List of functions to be serialized in the SavedModel. copy_from: List of other SerializedAttributes subclasses. The returned class will copy checkpoint objects/functions from each subclass. Returns: Child class with attributes as defined in the and lists.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py", + "ast_data": "FunctionDef name:with_attributes arg:name arg:checkpointable_objects arg:functions arg:copy_from arguments arg arg arg arg Assign BoolOp Assign BoolOp If Compare For Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_verify_managed_params", + "source_code": "def _verify_managed_params(module: nn.Module, params: list[nn.Parameter]) -> None:\n for param in params:\n if len(param.shape) == 0:\n param_name = ''\n for name, param_ in module.named_parameters():\n if param is param_:\n param_name = name\n break\n assert param_name\n raise ValueError(f\"FSDP doesn't support scalar parameters. Change {param_name} to a 1D tensor with numel equal to 1.\")", + "docstring": "Verify if the parameters are accepted by FSDP. The only restriction now is that the parameter cannot be a scalar tensor (param.shape == []).", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py", + "ast_data": "FunctionDef name:_verify_managed_params arg:module arg:params arguments arg arg For If Compare Call Assign For Call If Compare Assign Raise Call" + }, + { + "library": "django", + "name": "wrap_database_errors", + "source_code": "@cached_property\ndef wrap_database_errors(self):\n return DatabaseErrorWrapper(self)", + "docstring": "Context manager and decorator that re-throws backend-specific database exceptions using Django's common wrappers.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\base.py", + "ast_data": "FunctionDef name:wrap_database_errors arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "handle_dynamo_export_graph", + "source_code": "def handle_dynamo_export_graph(gm: GraphModule, inputs: Sequence[InputType], compile_gm: Callable[..., Any]) -> Callable[..., Any]:\n codegen = gm.graph._codegen\n gm.graph._codegen = torch.fx.graph.CodeGen()\n gm.recompile()\n compiled_fn = compile_gm(gm, codegen.process_inputs(*inputs))\n\n @functools.wraps(compiled_fn)\n def wrapper(*args: Any) -> Any:\n return codegen.process_outputs(compiled_fn(*codegen.process_inputs(*args)))\n return wrapper", + "docstring": "embeds pytrees in the FX graph codegen object, convert that to a normal FX graph so inductor can compile it.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\compile_fx.py", + "ast_data": "FunctionDef name:handle_dynamo_export_graph arg:gm arg:inputs arg:compile_gm arguments arg arg arg Assign Assign Call Call Assign Call Call FunctionDef name:wrapper arguments arg Return return:yes Call Call Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "maybe_iterable_to_list", + "source_code": "def maybe_iterable_to_list(obj: Iterable[T] | T) -> Collection[T] | T:\n if isinstance(obj, abc.Iterable) and (not isinstance(obj, abc.Sized)):\n return list(obj)\n obj = cast(Collection, obj)\n return obj", + "docstring": "If obj is Iterable but not list-like, consume into list.", + "type": "function", + "file_path": "pandas\\pandas\\core\\common.py", + "ast_data": "FunctionDef name:maybe_iterable_to_list arg:obj arguments arg If BoolOp Call Call Return return:yes Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "session_creator", + "source_code": "def session_creator(self, scaffold=None, config=None, checkpoint_dir=None, checkpoint_filename_with_path=None, max_wait_secs=7200):\n if config:\n session_config = copy.deepcopy(config)\n session_config.MergeFrom(self._session_config)\n else:\n session_config = self._session_config\n if not self._strategy or self._strategy.extended.experimental_should_init:\n logging.info('Creating chief session creator with config: %r', config)\n return monitored_session.ChiefSessionCreator(scaffold, master=self.master_target, config=session_config, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path)\n else:\n logging.info('Creating worker session creator with config: %r', config)\n return monitored_session.WorkerSessionCreator(scaffold, master=self.master_target, config=session_config, max_wait_secs=max_wait_secs)", + "docstring": "Returns a session creator. The returned session creator will be configured with the correct master target and session configs. It will also run either init ops or ready ops by querying the object when is called on it. Args: scaffold: A used for gathering or building supportive ops. If not specified a default one is created. It's used to finalize the graph. config: proto used to configure the session. checkpoint_dir: A string. Optional path to a directory where to restore variables. checkpoint_filename_with_path: Full file name path to the checkpoint file. Only one of or can be specified. max_wait_secs: Maximum time to wait for the session to become available. Returns: a descendant of SessionCreator.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py", + "ast_data": "FunctionDef name:session_creator arg:self arg:scaffold arg:config arg:checkpoint_dir arg:checkpoint_filename_with_path arg:max_wait_secs arguments arg arg arg arg arg arg If Assign Call Call Assign If BoolOp Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "from_library", + "source_code": "def from_library(lib):\n if not lib.function and (not lib.gradient):\n return []\n funcs = {fdef.signature.name: fdef for fdef in lib.function}\n for g in lib.gradient:\n if g.function_name not in funcs:\n raise ValueError(f\"FunctionDefLibrary missing '{g.function_name}' FunctionDef\\n{lib}\")\n if g.gradient_func not in funcs:\n raise ValueError(f\"FunctionDefLibrary missing '{g.gradient_func}' FunctionDef\\n{lib}\")\n func_to_grad = collections.defaultdict(lambda: None)\n grad_to_funcs = collections.defaultdict(list)\n for gdef in lib.gradient:\n func_to_grad[gdef.function_name] = gdef.gradient_func\n grad_to_funcs[gdef.gradient_func].append(gdef.function_name)\n ready = [fdef for fdef in lib.function if func_to_grad[fdef.signature.name] is None]\n if not ready:\n raise ValueError(f'FunctionDefLibrary contains cyclic gradient functions!\\n{lib}')\n initialized = {}\n while ready:\n fdef = ready.pop()\n name = fdef.signature.name\n grad = initialized.get(func_to_grad[name])\n if func_to_grad[name]:\n assert grad\n defined_func = _from_definition(fdef, grad_func=grad)\n initialized[name] = defined_func\n ready.extend((funcs[f] for f in grad_to_funcs[name]))\n return initialized.values()", + "docstring": "Creates _DefinedFunctions initialized from a FunctionDefLibrary proto. This method handles assigning the correct gradient functions to each function. Args: lib: a FunctionDefLibrary Returns: A list of _DefinedFunctions Raises: ValueError: is invalid", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py", + "ast_data": "FunctionDef name:from_library arg:lib arguments arg If BoolOp Return return:no Assign For If Compare Raise Call If Compare Raise Call Assign Call arguments Assign Call For Assign Call Assign Compare If Raise Call Assign While Assign Call Assign Assign Call If Assign Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "init_connection_state", + "source_code": "def init_connection_state(self):\n if self.alias not in RAN_DB_VERSION_CHECK:\n self.check_database_version_supported()\n RAN_DB_VERSION_CHECK.add(self.alias)", + "docstring": "Initialize the database connection settings.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\base.py", + "ast_data": "FunctionDef name:init_connection_state arg:self arguments arg If Compare Call Call" + }, + { + "library": "django", + "name": "i18n_javascript", + "source_code": "def i18n_javascript(self, request, extra_context=None):\n return JavaScriptCatalog.as_view(packages=['django.contrib.admin'])(request)", + "docstring": "Display the i18n JavaScript that the Django admin requires. is unused but present for consistency with the other admin views.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\sites.py", + "ast_data": "FunctionDef name:i18n_javascript arg:self arg:request arg:extra_context arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "get_metadata_routing", + "source_code": "def get_metadata_routing(self):\n router = MetadataRouter(owner=self.__class__.__name__).add(estimator=self._get_estimator(), method_mapping=MethodMapping().add(caller='fit', callee='fit'))\n return router", + "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.3 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\multioutput.py", + "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "get_asset_paths", + "source_code": "def get_asset_paths(self) -> list[str]:\n return []", + "docstring": "Return list of paths for assets (ex. templates, CSS, etc.).", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\__init__.py", + "ast_data": "FunctionDef name:get_asset_paths arg:self arguments arg Return return:no" + }, + { + "library": "django", + "name": "set_limits", + "source_code": "def set_limits(self, low=None, high=None):\n if high is not None:\n if self.high_mark is not None:\n self.high_mark = min(self.high_mark, self.low_mark + high)\n else:\n self.high_mark = self.low_mark + high\n if low is not None:\n if self.high_mark is not None:\n self.low_mark = min(self.high_mark, self.low_mark + low)\n else:\n self.low_mark = self.low_mark + low\n if self.low_mark == self.high_mark:\n self.set_empty()", + "docstring": "Adjust the limits on the rows retrieved. Use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, convert them to the appropriate offset and limit values. Apply any limits passed in here to the existing constraints. Add low to the current low value and clamp both to any existing high value.", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\query.py", + "ast_data": "FunctionDef name:set_limits arg:self arg:low arg:high arguments arg arg arg If Compare If Compare Assign Call Assign If Compare If Compare Assign Call Assign If Compare Call" + }, + { + "library": "tensorflow", + "name": "_make_window_size_func", + "source_code": "def _make_window_size_func(self, window_size_func):\n\n def window_size_func_wrapper(key):\n return ops.convert_to_tensor(window_size_func(key), dtype=dtypes.int64)\n self._window_size_func = structured_function.StructuredFunctionWrapper(window_size_func_wrapper, self._transformation_name(), input_structure=tensor_spec.TensorSpec([], dtypes.int64))\n if not self._window_size_func.output_structure.is_compatible_with(tensor_spec.TensorSpec([], dtypes.int64)):\n raise ValueError(f'Invalid `window_size_func`. `window_size_func` must return a single `tf.int64` scalar tensor but its return type is {self._window_size_func.output_structure}.')", + "docstring": "Make wrapping defun for window_size_func.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\group_by_window_op.py", + "ast_data": "FunctionDef name:_make_window_size_func arg:self arg:window_size_func arguments arg arg FunctionDef name:window_size_func_wrapper arg:key arguments arg Return return:yes Call Call Assign Call Call Call If Call Call Raise Call" + }, + { + "library": "tensorflow", + "name": "_TensorListScatterGrad", + "source_code": "@ops.RegisterGradient('TensorListScatter')\n@ops.RegisterGradient('TensorListScatterV2')\ndef _TensorListScatterGrad(op: ops.Operation, dlist):\n tensor = op.inputs[0]\n indices = op.inputs[1]\n dtensor = gen_list_ops.tensor_list_gather(dlist, indices, element_shape=array_ops.slice(array_ops.shape(tensor), [1], [-1]), element_dtype=tensor.dtype)\n if op.type == 'TensorListScatterV2':\n return (dtensor, None, None, None)\n else:\n return (dtensor, None, None)", + "docstring": "Gradient function for TensorListScatter.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\list_ops.py", + "ast_data": "FunctionDef name:_TensorListScatterGrad arg:op arg:dlist arguments arg arg Assign Assign Assign Call Call Call If Compare Return return:yes Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "ncols", + "source_code": "@property\ndef ncols(self) -> int:\n return sum((len(a.values) for a in self.values_axes))", + "docstring": "the number of total columns in the values axes", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:ncols arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "dynamic_shapes", + "source_code": "def dynamic_shapes(self, m, args, kwargs=None):\n t_ids = set()\n\n def find_shape(path, t):\n t_id = id(t)\n if t_id in self._shapes:\n t_ids.add(t_id)\n return self._shapes[t_id]\n else:\n return None\n combined_args = _combine_args(m, args, kwargs)\n dynamic_shapes = _tree_map_with_path(find_shape, combined_args)\n if any((t_id not in t_ids for t_id in self._shapes)):\n raise ValueError('Some tensors that were assigned shapes were not found in args. Maybe such tensors were copied when passing them as args? Maybe such tensors are contained in classes that were not registered with pytree?')\n return dynamic_shapes", + "docstring": "Generates the :func: pytree structure according to :func: and :func:.", + "type": "method", + "file_path": "pytorch\\torch\\export\\dynamic_shapes.py", + "ast_data": "FunctionDef name:dynamic_shapes arg:self arg:m arg:args arg:kwargs arguments arg arg arg arg Assign Call FunctionDef name:find_shape arg:path arg:t arguments arg arg Assign Call If Compare Call Return return:yes Return return:no Assign Call Assign Call If Call Compare Raise Call Return return:yes" + }, + { + "library": "django", + "name": "new", + "source_code": "def new(self, values=None):\n new_context = copy(self)\n new_context._reset_dicts(values)\n return new_context", + "docstring": "Return a new context with the same properties, but with only the values given in 'values' stored.", + "type": "method", + "file_path": "django\\django\\template\\context.py", + "ast_data": "FunctionDef name:new arg:self arg:values arguments arg arg Assign Call Call Return return:yes" + }, + { + "library": "cryptography", + "name": "__copy__", + "source_code": "@abc.abstractmethod\ndef __copy__(self) -> RSAPrivateKey:\n pass", + "docstring": "Returns a copy.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py", + "ast_data": "FunctionDef name:__copy__ arg:self arguments arg" + }, + { + "library": "pandas", + "name": "_ensure_nanosecond_dtype", + "source_code": "def _ensure_nanosecond_dtype(dtype: DtypeObj) -> None:\n msg = f\"The '{dtype.name}' dtype has no unit. Please pass in '{dtype.name}[ns]' instead.\"\n dtype = getattr(dtype, 'subtype', dtype)\n if not isinstance(dtype, np.dtype):\n pass\n elif dtype.kind in 'mM':\n if not is_supported_dtype(dtype):\n if dtype.name in ['datetime64', 'timedelta64']:\n raise ValueError(msg)\n raise TypeError(f\"dtype={dtype} is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns'\")", + "docstring": "Convert dtypes with granularity less than nanosecond to nanosecond >>> _ensure_nanosecond_dtype(np.dtype(\"M8[us]\")) >>> _ensure_nanosecond_dtype(np.dtype(\"M8[D]\")) Traceback (most recent call last): ... TypeError: dtype=datetime64[D] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns' >>> _ensure_nanosecond_dtype(np.dtype(\"m8[ps]\")) Traceback (most recent call last): ... TypeError: dtype=timedelta64[ps] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns'", + "type": "function", + "file_path": "pandas\\pandas\\core\\dtypes\\cast.py", + "ast_data": "FunctionDef name:_ensure_nanosecond_dtype arg:dtype arguments arg Assign Assign Call If Call If Compare If Call If Compare Raise Call Raise Call" + }, + { + "library": "tensorflow", + "name": "_attach_error_metadata", + "source_code": "def _attach_error_metadata(e, f):\n if hasattr(e, 'ag_pass_through'):\n return\n metadata = getattr(e, 'ag_error_metadata', None)\n source_map = f.ag_source_map\n if metadata is None:\n logging.log(1, 'Caught error in user callable %s', f, exc_info=True)\n message = '{}: {}'.format(e.__class__.__name__, e)\n else:\n message = None\n cause_tb = traceback.extract_tb(sys.exc_info()[2])[1:]\n e.ag_error_metadata = _ErrorMetadata(cause_tb, metadata, message, source_map, __file__)", + "docstring": "Augments an error with the metadata necessary for rewrite.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py", + "ast_data": "FunctionDef name:_attach_error_metadata arg:e arg:f arguments arg arg If Call Return return:no Assign Call Assign If Compare Call Assign Call Assign Assign Call Call Assign Call" + }, + { + "library": "matplotlib", + "name": "intersects_bbox", + "source_code": "def intersects_bbox(self, bbox, filled=True):\n return _path.path_intersects_rectangle(self, bbox.x0, bbox.y0, bbox.x1, bbox.y1, filled)", + "docstring": "Return whether this path intersects a given . If *filled* is True, then this also returns True if the path completely encloses the (i.e., the path is treated as filled). The bounding box is always considered filled.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\path.py", + "ast_data": "FunctionDef name:intersects_bbox arg:self arg:bbox arg:filled arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "ChannelShuffle", + "source_code": "class ChannelShuffle(Module):\n __constants__ = ['groups']\n groups: int\n\n def __init__(self, groups: int) -> None:\n super().__init__()\n self.groups = groups\n\n def forward(self, input: Tensor) -> Tensor:\n return F.channel_shuffle(input, self.groups)\n\n def extra_repr(self) -> str:\n return f'groups={self.groups}'", + "docstring": "Divides and rearranges the channels in a tensor. This operation divides the channels in a tensor of shape :math: into g groups as :math: and shuffles them, while retaining the original tensor shape in the final output. Args: groups (int): number of groups to divide channels in. Examples:: >>> channel_shuffle = nn.ChannelShuffle(2) >>> input = torch.arange(1, 17, dtype=torch.float32).view(1, 4, 2, 2) >>> input tensor([[[[ 1., 2.], [ 3., 4.]], [[ 5., 6.], [ 7., 8.]], [[ 9., 10.], [11., 12.]], [[13., 14.], [15., 16.]]]]) >>> output = channel_shuffle(input) >>> output tensor([[[[ 1., 2.], [ 3., 4.]], [[ 9., 10.], [11., 12.]], [[ 5., 6.], [ 7., 8.]], [[13., 14.], [15., 16.]]]])", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\channelshuffle.py", + "ast_data": "ClassDef name:ChannelShuffle Assign FunctionDef name:__init__ arg:self arg:groups arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "set_use_resource", + "source_code": "def set_use_resource(self, use_resource):\n if context.executing_eagerly() and (not use_resource):\n raise ValueError('When eager execution is enabled, use_resource cannot be set to false.')\n self._use_resource = use_resource", + "docstring": "Sets whether to use ResourceVariables for this scope.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py", + "ast_data": "FunctionDef name:set_use_resource arg:self arg:use_resource arguments arg arg If BoolOp Call Raise Call Assign" + }, + { + "library": "tensorflow", + "name": "_has_precomputed_nvals", + "source_code": "def _has_precomputed_nvals(self):\n return self._nvals is not None", + "docstring": "Returns true if has already been computed. If true, then will return its value without calling any TensorFlow ops.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:_has_precomputed_nvals arg:self arguments arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "dataset", + "source_code": "def dataset(directory, images_file, labels_file):\n images_file = download(directory, images_file)\n labels_file = download(directory, labels_file)\n check_image_file_header(images_file)\n check_labels_file_header(labels_file)\n\n def decode_image(image):\n image = tf.io.decode_raw(image, tf.uint8)\n image = tf.cast(image, tf.float32)\n image = tf.reshape(image, [784])\n return image / 255.0\n\n def decode_label(label):\n label = tf.io.decode_raw(label, tf.uint8)\n label = tf.reshape(label, [])\n return tf.cast(label, tf.int32)\n images = tf.data.FixedLengthRecordDataset(images_file, 28 * 28, header_bytes=16).map(decode_image)\n labels = tf.data.FixedLengthRecordDataset(labels_file, 1, header_bytes=8).map(decode_label)\n return tf.data.Dataset.zip((images, labels))", + "docstring": "Download and parse MNIST dataset.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\tutorials\\dataset.py", + "ast_data": "FunctionDef name:dataset arg:directory arg:images_file arg:labels_file arguments arg arg arg Assign Call Assign Call Call Call FunctionDef name:decode_image arg:image arguments arg Assign Call Assign Call Assign Call Return return:yes FunctionDef name:decode_label arg:label arguments arg Assign Call Assign Call Return return:yes Call Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_do_step", + "source_code": "def _do_step(x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha):\n x = x + alpha * d_x\n tau = tau + alpha * d_tau\n z = z + alpha * d_z\n kappa = kappa + alpha * d_kappa\n y = y + alpha * d_y\n return (x, y, z, tau, kappa)", + "docstring": "An implementation of [4] Equation 8.9 References ---------- .. [4] Andersen, Erling D., and Knud D. Andersen. \"The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm.\" High performance optimization. Springer US, 2000. 197-232.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_linprog_ip.py", + "ast_data": "FunctionDef name:_do_step arg:x arg:y arg:z arg:tau arg:kappa arg:d_x arg:d_y arg:d_z arg:d_tau arg:d_kappa arg:alpha arguments arg arg arg arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "step", + "source_code": "def step(self) -> None:\n for scheduler in self._schedulers:\n scheduler.step()\n self._last_lr = [group['lr'] for group in self._schedulers[-1].optimizer.param_groups]", + "docstring": "Perform a step.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "FunctionDef name:step arg:self arguments arg For Call Assign" + }, + { + "library": "tensorflow", + "name": "_is_gputrace_device", + "source_code": "def _is_gputrace_device(self, device_name: str) -> bool:\n return '/stream:' in device_name or '/memcpy' in device_name", + "docstring": "Returns true if this device is part of the GPUTracer logging.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py", + "ast_data": "FunctionDef name:_is_gputrace_device arg:self arg:device_name arguments arg arg Return return:yes BoolOp Compare Compare" + }, + { + "library": "scipy", + "name": "_logpdf", + "source_code": "def _logpdf(self, x, mean, cov_object):\n log_det_cov, rank = (cov_object.log_pdet, cov_object.rank)\n dev = x - mean\n if dev.ndim > 1:\n log_det_cov = log_det_cov[..., np.newaxis]\n rank = rank[..., np.newaxis]\n maha = np.sum(np.square(cov_object.whiten(dev)), axis=-1)\n return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)", + "docstring": "Log of the multivariate normal probability density function. Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function mean : ndarray Mean of the distribution cov_object : Covariance An object representing the Covariance matrix Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:_logpdf arg:self arg:x arg:mean arg:cov_object arguments arg arg arg arg Assign Assign If Compare Assign Assign Assign Call Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "add_decomposed_rel_pos", + "source_code": "def add_decomposed_rel_pos(attn: Tensor, q: Tensor, rel_pos_h: Tensor, rel_pos_w: Tensor, q_size: tuple[int, int], k_size: tuple[int, int]) -> Tensor:\n q_h, q_w = q_size\n k_h, k_w = k_size\n Rh = get_rel_pos(q_h, k_h, rel_pos_h)\n Rw = get_rel_pos(q_w, k_w, rel_pos_w)\n B, _, dim = q.shape\n r_q = q.reshape(B, q_h, q_w, dim)\n rel_h = torch.einsum('bhwc,hkc->bhwk', r_q, Rh)\n rel_w = torch.einsum('bhwc,wkc->bhwk', r_q, Rw)\n attn = (attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]).view(B, q_h * q_w, k_h * k_w)\n return attn", + "docstring": "Calculate decomposed Relative Positional Embeddings. from :paper:. Args: attn: attention map. q: query q in the attention layer with shape (B, q_h * q_w, C). rel_pos_h: relative position embeddings (Lh, C) for height axis. rel_pos_w: relative position embeddings (Lw, C) for width axis. q_size: spatial sequence size of query q with (q_h, q_w). k_size: spatial sequence size of key k with (k_h, k_w). Returns: att: attention map with added relative positional embeddings.", + "type": "function", + "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\image_encoder.py", + "ast_data": "FunctionDef name:add_decomposed_rel_pos arg:attn arg:q arg:rel_pos_h arg:rel_pos_w arg:q_size arg:k_size arguments arg arg arg arg arg arg Assign Assign Assign Call Assign Call Assign Assign Call Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "authlib", + "name": "register_token_generator", + "source_code": "def register_token_generator(self, grant_type, func):\n self._token_generators[grant_type] = func", + "docstring": "Register a function as token generator for the given ``:: def generate_bearer_token( grant_type, client, user=None, scope=None, expires_in=None, include_refresh_token=True, ): token = {\"token_type\": \"Bearer\", \"access_token\": ...} if include_refresh_token: token[\"refresh_token\"] = ... ... return token authorization_server.register_token_generator( \"default\", generate_bearer_token ) If you register a generator for a certain grant type, that generator will only works for the given grant type:: authorization_server.register_token_generator( \"client_credentials\", generate_bearer_token, ) :param grant_type: string name of the grant type :param func: a function to generate token", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py", + "ast_data": "FunctionDef name:register_token_generator arg:self arg:grant_type arg:func arguments arg arg arg Assign" + }, + { + "library": "django", + "name": "subwidgets", + "source_code": "def subwidgets(self, name, value, attrs=None):\n value = self.format_value(value)\n yield from self.options(name, value, attrs)", + "docstring": "Yield all \"subwidgets\" of this widget. Used to enable iterating options from a BoundField for choice widgets.", + "type": "method", + "file_path": "django\\django\\forms\\widgets.py", + "ast_data": "FunctionDef name:subwidgets arg:self arg:name arg:value arg:attrs arguments arg arg arg arg Assign Call Call" + }, + { + "library": "pytorch", + "name": "modules_to_mkldnn", + "source_code": "def modules_to_mkldnn(nodes: list[fx.Node], modules: dict[str, nn.Module]):\n old_modules: dict[nn.Module, nn.Module] = {}\n for node in nodes:\n if node.op == 'call_module':\n assert isinstance(node.target, str)\n cur_module = modules[node.target]\n if type(cur_module) in mkldnn_map:\n new_module = mkldnn_map[type(cur_module)](cur_module, torch.float)\n assert isinstance(new_module, nn.Module)\n old_modules[new_module] = copy.deepcopy(cur_module)\n replace_node_module(node, modules, new_module)\n return old_modules", + "docstring": "For each node, if it's a module that can be preconverted into MKLDNN, then we do so and create a mapping to allow us to convert from the MKLDNN version of the module to the original.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\optimization.py", + "ast_data": "FunctionDef name:modules_to_mkldnn arg:nodes arg:modules arguments arg arg For If Compare Call Assign If Compare Call Assign Call Call Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_set_pattern_complex_format", + "source_code": "def _set_pattern_complex_format(self, pattern: Pattern) -> BackendPatternConfig:\n if self.pattern is not None:\n raise ValueError(\"Only one of 'pattern' or 'pattern_complex_format' can be set\")\n self._pattern_complex_format = pattern\n return self", + "docstring": "Set the pattern to configure, using the reversed nested tuple format. See the BackendConfig README for more detail:", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py", + "ast_data": "FunctionDef name:_set_pattern_complex_format arg:self arg:pattern arguments arg arg If Compare Raise Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "_from_sequence_of_strings", + "source_code": "@classmethod\ndef _from_sequence_of_strings(cls, strings, *, dtype: ExtensionDtype, copy: bool=False) -> Self:\n raise AbstractMethodError(cls)", + "docstring": "Construct a new ExtensionArray from a sequence of strings. Parameters ---------- strings : Sequence Each element will be an instance of the scalar type for this array, ``. dtype : ExtensionDtype Construct for this particular dtype. This should be a Dtype compatible with the ExtensionArray. copy : bool, default False If True, copy the underlying data. Returns ------- ExtensionArray See Also -------- api.extensions.ExtensionArray._from_sequence : Construct a new ExtensionArray from a sequence of scalars. api.extensions.ExtensionArray._from_factorized : Reconstruct an ExtensionArray after factorization. api.extensions.ExtensionArray._from_scalars : Strict analogue to _from_sequence, allowing only sequences of scalars that should be specifically inferred to the given dtype. Examples -------- >>> pd.arrays.IntegerArray._from_sequence_of_strings( ... [\"1\", \"2\", \"3\"], dtype=pd.Int64Dtype() ... ) [1, 2, 3] Length: 3, dtype: Int64", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:_from_sequence_of_strings arg:cls arg:strings arguments arg arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "_overload_operator", + "source_code": "@classmethod\ndef _overload_operator(cls, tensor_class, operator):\n tensor_oper = getattr(tensor_class, operator)\n tensor_oper = getattr(tensor_oper, '__func__', tensor_oper)\n setattr(cls, operator, tensor_oper)", + "docstring": "Overload an operator with the same implementation as a base Tensor class. We pull the operator out of the class dynamically to avoid ordering issues. Args: tensor_class: The (Composite)Tensor to get the method from. operator: string. The operator name.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py", + "ast_data": "FunctionDef name:_overload_operator arg:cls arg:tensor_class arg:operator arguments arg arg arg Assign Call Assign Call Call" + }, + { + "library": "numpy", + "name": "as_array", + "source_code": "@set_module('numpy.ctypeslib')\ndef as_array(obj, shape=None):\n if isinstance(obj, ctypes._Pointer):\n if shape is None:\n raise TypeError('as_array() requires a shape argument when called on a pointer')\n p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape))\n obj = ctypes.cast(obj, p_arr_type).contents\n return np.asarray(obj)", + "docstring": "Create a numpy array from a ctypes array or POINTER. The numpy array shares the memory with the ctypes object. The shape parameter must be given if converting from a ctypes POINTER. The shape parameter is ignored if converting from a ctypes array Examples -------- Converting a ctypes integer array: >>> import ctypes >>> ctypes_array = (ctypes.c_int * 5)(0, 1, 2, 3, 4) >>> np_array = np.ctypeslib.as_array(ctypes_array) >>> np_array array([0, 1, 2, 3, 4], dtype=int32) Converting a ctypes POINTER: >>> import ctypes >>> buffer = (ctypes.c_int * 5)(0, 1, 2, 3, 4) >>> pointer = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_int)) >>> np_array = np.ctypeslib.as_array(pointer, (5,)) >>> np_array array([0, 1, 2, 3, 4], dtype=int32)", + "type": "function", + "file_path": "numpy\\numpy\\ctypeslib\\_ctypeslib.py", + "ast_data": "FunctionDef name:as_array arg:obj arg:shape arguments arg arg If Call If Compare Raise Call Assign Call Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "__call__", + "source_code": "def __call__(self, shape, dtype=None, **kwargs):\n _validate_kwargs(self.__class__.__name__, kwargs)\n dtype = _assert_float_dtype(_get_dtype(dtype))\n scale = self.scale\n fan_in, fan_out = _compute_fans(shape)\n if _PARTITION_SHAPE in kwargs:\n shape = kwargs[_PARTITION_SHAPE]\n if self.mode == 'fan_in':\n scale /= max(1.0, fan_in)\n elif self.mode == 'fan_out':\n scale /= max(1.0, fan_out)\n else:\n scale /= max(1.0, (fan_in + fan_out) / 2.0)\n if self.distribution == 'truncated_normal':\n stddev = math.sqrt(scale) / 0.8796256610342398\n return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype)\n elif self.distribution == 'untruncated_normal':\n stddev = math.sqrt(scale)\n return self._random_generator.random_normal(shape, 0.0, stddev, dtype)\n else:\n limit = math.sqrt(3.0 * scale)\n return self._random_generator.random_uniform(shape, -limit, limit, dtype)", + "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. If not specified, is used, which default to unless you configured it otherwise (via ) **kwargs: Additional keyword arguments.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call Call Assign Assign Call If Compare Assign If Compare Call If Compare Call Call If Compare Assign Call Return return:yes Call If Compare Assign Call Return return:yes Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "delete", + "source_code": "def delete(self, target: function_type.FunctionType) -> None:\n if target in self._dispatch_table:\n del self._dispatch_table[target]\n for request in list(self._dispatch_cache.keys()):\n if self._dispatch_cache[request] == target:\n del self._dispatch_cache[request]", + "docstring": "Deletes a target in the table if it exists.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\type_dispatch.py", + "ast_data": "FunctionDef name:delete arg:self arg:target arguments arg arg If Compare For Call Call If Compare" + }, + { + "library": "tensorflow", + "name": "BatchableExtensionType", + "source_code": "@tf_export('experimental.BatchableExtensionType')\nclass BatchableExtensionType(ExtensionType):\n _tf_extension_type_do_not_transform_this_class = True", + "docstring": "An ExtensionType that can be batched and unbatched. s can be used with APIs that require batching or unbatching, including , , and . E.g.: >>> class Vehicle(tf.experimental.BatchableExtensionType): ... top_speed: tf.Tensor ... mpg: tf.Tensor >>> batch = Vehicle([120, 150, 80], [30, 40, 12]) >>> tf.map_fn(lambda vehicle: vehicle.top_speed * vehicle.mpg, batch, ... fn_output_signature=tf.int32).numpy() array([3600, 6000, 960], dtype=int32) An is used by these APIs to encode values. The default encoder assumes that values can be stacked, unstacked, or concatenated by simply stacking, unstacking, or concatenating every nested , , , or field. Extension types where this is not the case will need to override with a custom . See for more details.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py", + "ast_data": "ClassDef name:BatchableExtensionType Assign Call" + }, + { + "library": "tensorflow", + "name": "UserRegisteredSpec", + "source_code": "class UserRegisteredSpec(type_spec_module.TypeSpec):\n\n def __init__(self, shape, dtype):\n self.shape = shape\n self._dtype = dtype\n self.dtype = dtype\n\n def _component_specs(self):\n raise NotImplementedError\n\n def _from_components(self, components):\n raise NotImplementedError\n\n def _serialize(self):\n raise NotImplementedError\n\n def _to_components(self, value):\n raise NotImplementedError\n\n def value_type(self):\n raise NotImplementedError", + "docstring": "TypeSpec to represent user-registered symbolic objects.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py", + "ast_data": "ClassDef name:UserRegisteredSpec FunctionDef name:__init__ arg:self arg:shape arg:dtype arguments arg arg arg Assign Assign Assign FunctionDef name:_component_specs arg:self arguments arg Raise FunctionDef name:_from_components arg:self arg:components arguments arg arg Raise FunctionDef name:_serialize arg:self arguments arg Raise FunctionDef name:_to_components arg:self arg:value arguments arg arg Raise FunctionDef name:value_type arg:self arguments arg Raise" + }, + { + "library": "django", + "name": "disable_constraint_checking", + "source_code": "def disable_constraint_checking(self):\n with self.cursor() as cursor:\n cursor.execute('SET foreign_key_checks=0')\n return True", + "docstring": "Disable foreign key checks, primarily for use in adding rows with forward references. Always return True to indicate constraint checks need to be re-enabled.", + "type": "method", + "file_path": "django\\django\\db\\backends\\mysql\\base.py", + "ast_data": "FunctionDef name:disable_constraint_checking arg:self arguments arg With Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "with_extremes", + "source_code": "def with_extremes(self, *, bad=None, under=None, over=None):\n new_cm = self.copy()\n if bad is not None:\n new_cm._rgba_bad = to_rgba(bad)\n if under is not None:\n if not np.iterable(under) or len(under) != len(new_cm):\n raise ValueError(f'*under* must contain a color for each scalar colormap i.e. be of length {len(new_cm)}.')\n else:\n for c, b in zip(new_cm, under):\n c.set_under(b)\n if over is not None:\n if not np.iterable(over) or len(over) != len(new_cm):\n raise ValueError(f'*over* must contain a color for each scalar colormap i.e. be of length {len(new_cm)}.')\n else:\n for c, b in zip(new_cm, over):\n c.set_over(b)\n return new_cm", + "docstring": "Return a copy of the with modified out-of-range attributes. The *bad* keyword modifies the copied while *under* and *over* modifies the attributes of the copied component colormaps. Note that *under* and *over* colors are subject to the mixing rules determined by the *combination_mode*. Parameters ---------- bad: :mpltype:, default: None If Matplotlib color, the bad value is set accordingly in the copy under tuple of :mpltype:, default: None If tuple, the value of each component is set with the values from the tuple. over tuple of :mpltype:, default: None If tuple, the value of each component is set with the values from the tuple. Returns ------- MultivarColormap copy of self with attributes set", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:with_extremes arg:self arguments arg arg arg arg Assign Call If Compare Assign Call If Compare If BoolOp Call Compare Call Call Raise Call Call For Call Call If Compare If BoolOp Call Compare Call Call Raise Call Call For Call Call Return return:yes" + }, + { + "library": "django", + "name": "info", + "source_code": "@property\ndef info(self):\n return capi.get_ds_info(self.ptr, None).decode()", + "docstring": "Return information about this raster in a string format equivalent to the output of the gdalinfo command line utility.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py", + "ast_data": "FunctionDef name:info arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "to_proto", + "source_code": "def to_proto(self) -> function_type_pb2.Parameter:\n serialized_type_constraint = serialization.serialize(self.type_constraint) if self.type_constraint else None\n return function_type_pb2.Parameter(name=self.name, kind=PY_TO_PROTO_ENUM[self.kind], is_optional=self.optional, type_constraint=serialized_type_constraint)", + "docstring": "Generate a proto representation of the Parameter.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py", + "ast_data": "FunctionDef name:to_proto arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n _check_is_fitted(self)\n estimator = getattr(self, 'estimator_', self.estimator)\n y_score, _, response_method_used = _get_response_values_binary(estimator, X, self._get_response_method(), pos_label=self.pos_label, return_response_method_used=True)\n if self.threshold == 'auto':\n decision_threshold = 0.5 if response_method_used == 'predict_proba' else 0.0\n else:\n decision_threshold = self.threshold\n return _threshold_scores_to_class_labels(y_score, decision_threshold, self.classes_, self.pos_label)", + "docstring": "Predict the target of new samples. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The samples, as accepted by . Returns ------- class_labels : ndarray of shape (n_samples,) The predicted class.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_classification_threshold.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Assign Call Call If Compare Assign Compare Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "istensor", + "source_code": "def istensor(obj):\n tensor_list: tuple[type, ...] = (torch.Tensor, torch.nn.Parameter, *config.traceable_tensor_subclasses)\n tensor_list = tensor_list + (torch._subclasses.FakeTensor,)\n return istype(obj, tensor_list)", + "docstring": "Check of obj is a tensor", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:istensor arg:obj arguments arg Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "reverse_dict", + "source_code": "def reverse_dict(d):\n result = {}\n for key in d:\n for val in d[key]:\n result[val] = result.get(val, ()) + (key,)\n return result", + "docstring": "Reverses direction of dependence dict >>> d = {\"a\": (1, 2), \"b\": (2, 3), \"c\": ()} >>> reverse_dict(d) # doctest: +SKIP {1: ('a',), 2: ('a', 'b'), 3: ('b',)} :note: dict order are not deterministic. As we iterate on the input dict, it make the output of this function depend on the dict order. So this function output order should be considered as undeterministic.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\unification\\utils.py", + "ast_data": "FunctionDef name:reverse_dict arg:d arguments arg Assign For For Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "transform_feature", + "source_code": "def transform_feature(self, transformation_cache, state_manager):\n input_tensor = _to_sparse_input_and_drop_ignore_values(transformation_cache.get(self.key, state_manager))\n return self._transform_input_tensor(input_tensor, state_manager)", + "docstring": "Creates a lookup table for the vocabulary list.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:transform_feature arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Assign Call Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "trimseq", + "source_code": "def trimseq(seq):\n if len(seq) == 0 or seq[-1] != 0:\n return seq\n else:\n for i in range(len(seq) - 1, -1, -1):\n if seq[i] != 0:\n break\n return seq[:i + 1]", + "docstring": "Remove small Poly series coefficients. Parameters ---------- seq : sequence Sequence of Poly series coefficients. Returns ------- series : sequence Subsequence with trailing zeros removed. If the resulting sequence would be empty, return the first element. The returned sequence may or may not be a view. Notes ----- Do not lose the type info if the sequence contains unknown objects.", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\polyutils.py", + "ast_data": "FunctionDef name:trimseq arg:seq arguments arg If BoolOp Compare Call Compare Return return:yes For Call Call If Compare Return return:yes" + }, + { + "library": "tensorflow", + "name": "accelerator_type", + "source_code": "def accelerator_type(self):\n return self._get_tpu_property('acceleratorType')", + "docstring": "Return accelerator type of the TPU.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py", + "ast_data": "FunctionDef name:accelerator_type arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "mish", + "source_code": "def mish(input: Tensor, inplace: bool=False) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(mish, (input,), input, inplace=inplace)\n if inplace:\n return torch._C._nn.mish_(input)\n return torch._C._nn.mish(input)", + "docstring": "Apply the Mish function, element-wise. Mish: A Self Regularized Non-Monotonic Neural Activation Function. .. math:: \\text{Mish}(x) = x * \\text{Tanh}(\\text{Softplus}(x)) .. note:: See _ See :class: for more details.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:mish arg:input arg:inplace arguments arg arg If Call Return return:yes Call If Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_or_make_slot_with_initializer", + "source_code": "def _get_or_make_slot_with_initializer(self, var, initializer, shape, dtype, slot_name, op_name):\n named_slots = self._slot_dict(slot_name)\n if _var_key(var) not in named_slots:\n new_slot_variable = slot_creator.create_slot_with_initializer(var, initializer, shape, dtype, op_name, copy_xla_sharding=True)\n self._restore_slot_variable(slot_name=slot_name, variable=var, slot_variable=new_slot_variable)\n named_slots[_var_key(var)] = new_slot_variable\n return named_slots[_var_key(var)]", + "docstring": "Find or create a slot for a variable, using an Initializer. Args: var: A object. initializer: An . The initial value of the slot. shape: Shape of the initial value of the slot. dtype: Type of the value of the slot. slot_name: Name for the slot. op_name: Name to use when scoping the Variable that needs to be created for the slot. Returns: A object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py", + "ast_data": "FunctionDef name:_get_or_make_slot_with_initializer arg:self arg:var arg:initializer arg:shape arg:dtype arg:slot_name arg:op_name arguments arg arg arg arg arg arg arg Assign Call If Compare Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_constant_impl", + "source_code": "def _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast) -> Union[ops.Operation, ops._EagerTensorBase]:\n ctx = context.context()\n if ctx.executing_eagerly():\n if trace.enabled:\n with trace.Trace('tf.constant'):\n return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)\n return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)\n const_tensor = ops._create_graph_constant(value, dtype, shape, name, verify_shape, allow_broadcast)\n return const_tensor", + "docstring": "Implementation of constant.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\constant_op.py", + "ast_data": "FunctionDef name:_constant_impl arg:value arg:dtype arg:shape arg:name arg:verify_shape arg:allow_broadcast arguments arg arg arg arg arg arg Assign Call If Call If With Call Return return:yes Call Return return:yes Call Assign Call Return return:yes" + }, + { + "library": "scrapy", + "name": "jmespath", + "source_code": "def jmespath(self, *a: Any, **kw: Any) -> SelectorList:\n raise NotSupported(\"Response content isn't text\")", + "docstring": "Shortcut method implemented only by responses whose content is text (subclasses of TextResponse).", + "type": "method", + "file_path": "scrapy\\scrapy\\http\\response\\__init__.py", + "ast_data": "FunctionDef name:jmespath arg:self arguments arg arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "_finalize_params", + "source_code": "@no_type_check\ndef _finalize_params(state: _FSDPState) -> None:\n handle = state._handle\n if not handle:\n return\n flat_param = handle.flat_param\n if torch.distributed._functional_collectives.is_torchdynamo_compiling():\n if hasattr(flat_param, '_post_backward_hook_handle'):\n pbhs_handle = flat_param._post_backward_hook_handle\n pbhs_handle.remove()\n del flat_param._post_backward_hook_handle\n elif hasattr(flat_param, '_post_backward_hook_state'):\n post_backward_hook_state_len = len(flat_param._post_backward_hook_state)\n expected_post_backward_hook_state_len = int(flat_param.requires_grad) + 1\n _p_assert(post_backward_hook_state_len == expected_post_backward_hook_state_len, f'Invalid: ``_post_backward_hook_state``: {flat_param._post_backward_hook_state}')\n flat_param._post_backward_hook_state[-1].remove()\n delattr(flat_param, '_post_backward_hook_state')\n if flat_param.requires_grad:\n if not state._sync_gradients:\n return\n if not handle._has_optim_in_backward:\n handle.prepare_gradient_for_optim()\n _p_assert(hasattr(flat_param, '_post_backward_called'), 'Expects `_post_backward_called` to be set on the `FlatParameter`')\n flat_param._post_backward_called = False", + "docstring": "Finalizes the parameters before the next iteration.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py", + "ast_data": "FunctionDef name:_finalize_params arg:state arguments arg Assign If Return return:no Assign If Call If Call Assign Call If Call Assign Call Assign Call Call Compare Call Call If If Return return:no If Call Call Call Assign" + }, + { + "library": "pytorch", + "name": "sym_float", + "source_code": "@onnx_impl(torch.sym_float, trace_only=True)\ndef sym_float(self: TensorType) -> FLOAT:\n return op.Cast(self, to=FLOAT.dtype)", + "docstring": "sym_float(SymInt self) -> SymFloat", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_torchlib\\ops\\symops.py", + "ast_data": "FunctionDef name:sym_float arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "update_obs_for_equalization", + "source_code": "def update_obs_for_equalization(model: GraphModule, modules: dict[str, nn.Module]) -> dict[str, _WeightEqualizationObserver]:\n weight_eq_obs_dict = {}\n for node in model.graph.nodes:\n if node.op == 'call_module' and isinstance(modules[node.target], _InputEqualizationObserver):\n input_eq_obs = modules[node.target]\n assert isinstance(input_eq_obs, _InputEqualizationObserver)\n op_node, weight_eq_obs = get_op_node_and_weight_eq_obs(node, model, modules)\n if op_node is None or weight_eq_obs is None:\n continue\n if op_node.op == 'call_module':\n if fused_module_supports_equalization(modules[str(op_node.target)]):\n module = modules[str(op_node.target)][0]\n assert nn_module_supports_equalization(module)\n weight_eq_obs(module.weight)\n else:\n weight_eq_obs(modules[str(op_node.target)].weight)\n equalization_scale = calculate_equalization_scale(input_eq_obs, weight_eq_obs)\n input_eq_obs.set_equalization_scale(equalization_scale)\n weight_eq_obs.set_equalization_scale(equalization_scale)\n weight_eq_obs_dict[op_node.name] = weight_eq_obs\n return weight_eq_obs_dict", + "docstring": "Update all of the observer's equalization scale. For each InputEqualizationObserver, we will find the location of the next WeightEqualizationObserver, create it, and calculate the equalization scale based on the two observers. We will then return a dictionary mapping operation node names to the corresponding WeightEqualizationObservers for that operation.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py", + "ast_data": "FunctionDef name:update_obs_for_equalization arg:model arg:modules arguments arg arg Assign For If BoolOp Compare Call Assign Call Assign Call If BoolOp Compare Compare If Compare If Call Call Assign Call Call Call Call Call Assign Call Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_without_tensor_names", + "source_code": "def _without_tensor_names(self) -> 'TypeSpec':\n\n def rename(value):\n if isinstance(value, TypeSpec):\n return value._without_tensor_names()\n return value\n return self._deserialize(nest.map_structure(rename, self._serialize()))", + "docstring": "Returns a TypeSpec compatible with , with tensor names removed. Returns: A that is compatible with , where the name of any is set to .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", + "ast_data": "FunctionDef name:_without_tensor_names arg:self arguments arg FunctionDef name:rename arg:value arguments arg If Call Return return:yes Call Return return:yes Return return:yes Call Call Call" + }, + { + "library": "numpy", + "name": "ndim", + "source_code": "def ndim(obj):\n return np.ndim(getdata(obj))", + "docstring": "maskedarray version of the numpy function.", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:ndim arg:obj arguments arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "union", + "source_code": "def union(self, other) -> FrozenList:\n if isinstance(other, tuple):\n other = list(other)\n return type(self)(super().__add__(other))", + "docstring": "Returns a FrozenList with other concatenated to the end of self. Parameters ---------- other : array-like The array-like whose elements we are concatenating. Returns ------- FrozenList The collection difference between self and other.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\frozen.py", + "ast_data": "FunctionDef name:union arg:self arg:other arguments arg arg If Call Assign Call Return return:yes Call Call Call Call" + }, + { + "library": "pytorch", + "name": "WorkerState", + "source_code": "class WorkerState(str, Enum):\n UNKNOWN = 'UNKNOWN'\n INIT = 'INIT'\n HEALTHY = 'HEALTHY'\n UNHEALTHY = 'UNHEALTHY'\n STOPPED = 'STOPPED'\n SUCCEEDED = 'SUCCEEDED'\n FAILED = 'FAILED'\n\n @staticmethod\n def is_running(state: 'WorkerState') -> bool:\n return state in {WorkerState.HEALTHY, WorkerState.UNHEALTHY}", + "docstring": "A state of the `` state and is better off self terminating and allowing the job manager to retry the node.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py", + "ast_data": "ClassDef name:WorkerState Assign Assign Assign Assign Assign Assign Assign FunctionDef name:is_running arg:state arguments arg Return return:yes Compare" + }, + { + "library": "matplotlib", + "name": "barh", + "source_code": "@_docstring.interpd\ndef barh(self, y, width, height=0.8, left=None, *, align='center', data=None, **kwargs):\n kwargs.setdefault('orientation', 'horizontal')\n patches = self.bar(x=left, height=height, width=width, bottom=y, align=align, data=data, **kwargs)\n return patches", + "docstring": "Make a horizontal bar plot. The bars are positioned at *y* with the given *align*\\ment. Their dimensions are given by *width* and *height*. The horizontal baseline is *left* (default 0). Many parameters can take either a single value applying to all bars or a sequence of values, one for each bar. Parameters ---------- y : float or array-like The y coordinates of the bars. See also *align* for the alignment of the bars to the coordinates. Bars are often used for categorical data, i.e. string labels below the bars. You can provide a list of strings directly to *y*. `.BarContainercolorcolorcolorcolor.BarContainer/gallery/statistics/errorbar_featurescolorcolorerrorbar.capsize~.Axes.errorbar.Rectangle/gallery/lines_bars_and_markers/horizontal_barchart_distribution`.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", + "ast_data": "FunctionDef name:barh arg:self arg:y arg:width arg:height arg:left arguments arg arg arg arg arg arg arg arg Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "idealfourths", + "source_code": "def idealfourths(data, axis=None):\n\n def _idf(data):\n x = data.compressed()\n n = len(x)\n if n < 3:\n return [np.nan, np.nan]\n j, h = divmod(n / 4.0 + 5 / 12.0, 1)\n j = int(j)\n qlo = (1 - h) * x[j - 1] + h * x[j]\n k = n - j\n qup = (1 - h) * x[k] + h * x[k - 1]\n return [qlo, qup]\n data = ma.sort(data, axis=axis).view(MaskedArray)\n if axis is None:\n return _idf(data)\n else:\n return ma.apply_along_axis(_idf, axis, data)", + "docstring": "Returns an estimate of the lower and upper quartiles. Uses the ideal fourths algorithm. Parameters ---------- data : array_like Input array. axis : int, optional Axis along which the quartiles are estimated. If None, the arrays are flattened. Returns ------- idealfourths : {list of floats, masked array} Returns the two internal values that divide into four parts using the ideal fourths algorithm either along the flattened array (if is None) or along of .", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mstats_extras.py", + "ast_data": "FunctionDef name:idealfourths arg:data arg:axis arguments arg arg FunctionDef name:_idf arg:data arguments arg Assign Call Assign Call If Compare Return return:yes Assign Call Assign Call Assign Assign Assign Return return:yes Assign Call Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "ordered", + "source_code": "@property\ndef ordered(self):\n if isinstance(self, EmptyQuerySet):\n return True\n if self.query.extra_order_by or self.query.order_by:\n return True\n elif self.query.default_ordering and self.query.get_meta().ordering and (not self.query.group_by):\n return True\n else:\n return False", + "docstring": "Return True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model (or is empty).", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:ordered arg:self arguments arg If Call Return return:yes If BoolOp Return return:yes If BoolOp Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "_tile_description_to_json", + "source_code": "@classmethod\ndef _tile_description_to_json(cls, tile_desc):\n if tile_desc is None:\n return None\n math_instruction_dict = None\n if hasattr(tile_desc, 'math_instruction') and tile_desc.math_instruction is not None:\n math_instruction = tile_desc.math_instruction\n math_instruction_dict = {'instruction_shape': math_instruction.instruction_shape, 'element_a': cls._enum_to_json(math_instruction.element_a), 'element_b': cls._enum_to_json(math_instruction.element_b), 'element_accumulator': cls._enum_to_json(math_instruction.element_accumulator), 'opcode_class': cls._enum_to_json(math_instruction.opcode_class), 'math_operation': cls._enum_to_json(math_instruction.math_operation)}\n if hasattr(math_instruction, 'element_scale_factor') and math_instruction.element_scale_factor is not None:\n math_instruction_dict['element_scale_factor'] = cls._enum_to_json(math_instruction.element_scale_factor)\n result = {'threadblock_shape': tile_desc.threadblock_shape, 'stages': tile_desc.stages, 'warp_count': tile_desc.warp_count, 'math_instruction': math_instruction_dict, 'min_compute': tile_desc.minimum_compute_capability, 'max_compute': tile_desc.maximum_compute_capability, 'cluster_shape': tile_desc.cluster_shape, 'explicit_vector_sizes': tile_desc.explicit_vector_sizes}\n if hasattr(tile_desc, 'tile_shape') and tile_desc.tile_shape != tile_desc.threadblock_shape:\n result['tile_shape'] = tile_desc.tile_shape\n return result", + "docstring": "Convert TileDescription to JSON dict. Args: tile_desc: TileDescription object Returns: dict: Dictionary representation", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\serialization.py", + "ast_data": "FunctionDef name:_tile_description_to_json arg:cls arg:tile_desc arguments arg arg If Compare Return return:no Assign If BoolOp Call Compare Assign Assign Call Call Call Call Call If BoolOp Call Compare Assign Call Assign If BoolOp Call Compare Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_params", + "source_code": "def set_params(self, numticks=None, presets=None):\n if presets is not None:\n self.presets = presets\n if numticks is not None:\n self.numticks = numticks", + "docstring": "Set parameters within this locator.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:set_params arg:self arg:numticks arg:presets arguments arg arg arg If Compare Assign If Compare Assign" + }, + { + "library": "django", + "name": "__str__", + "source_code": "def __str__(self):\n return str(self.srs)", + "docstring": "Return the string representation, a 'pretty' OGC WKT.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py", + "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_offload_state_dict_to_cpu", + "source_code": "def _offload_state_dict_to_cpu(state_dict: dict[str, Any], *, ranks_only: tuple[int, ...]=(), type_check: bool=True) -> dict[str, Any]:\n ret = _iterate_state_dict(state_dict, _identity_func, _identity_func, _identity_func, pg=None, device=None, cpu_offload=True, ranks_only=ranks_only, type_check=type_check)\n return ret", + "docstring": "Given a state_dict, this API offload all the tensors to CPU memory. Args: state_dict (Dict[str, Any]): the target state_dict. pg (Optional[dist.ProcessGroup]): the process group that is used to gather ShardedTensor. Note that gathering a DTensor will use the DeviceMesh. So this argument will be ignored when gathering a DTensor. ranks_only: (Tuple[int, ...]): if this tuple is empty, all ranks will have the same state_dicts. Otherwise only ranks that in `` have the same state_dicts. Other ranks will get empty state_dicts. type_check: (bool): check if the instance data type is a supported type that can be saved by DCP. The current supported data types are torch.Tensor, DTensor, int, float, str, list, dict, None. Returns: The gathered state dictionary.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_state_dict_utils.py", + "ast_data": "FunctionDef name:_offload_state_dict_to_cpu arg:state_dict arguments arg arg arg Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "ImageSequential", + "source_code": "class ImageSequential(Sequential, ImageModuleMixIn, ONNXExportMixin):\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n self._disable_features: bool = False\n\n @property\n def disable_features(self) -> bool:\n return self._disable_features\n\n @disable_features.setter\n def disable_features(self, value: bool=True) -> None:\n self._disable_features = value\n\n def __call__(self, *inputs: Any, input_names_to_handle: Optional[list[Any]]=None, output_type: str='tensor', **kwargs: Any) -> Any:\n if not self._disable_features:\n decorated_forward = self.convert_input_output(input_names_to_handle=input_names_to_handle, output_type=output_type)(super().__call__)\n _output_image = decorated_forward(*inputs, **kwargs)\n if output_type == 'tensor':\n self._output_image = self._detach_tensor_to_cpu(_output_image)\n else:\n self._output_image = _output_image\n else:\n _output_image = super().__call__(*inputs, **kwargs)\n return _output_image", + "docstring": "Handles image-based operations as a sequential module. This modules accepts multiple input and output data types, provides end-to-end visualization, file saving features. Note that this module fits the classes that return one image tensor only. Note: The additional add-on features increase the use of memories. To restore the original behaviour, you may set .", + "type": "class", + "file_path": "kornia\\kornia\\core\\module.py", + "ast_data": "ClassDef name:ImageSequential FunctionDef name:__init__ arg:self arguments arg arg arg Call Call FunctionDef name:disable_features arg:self arguments arg Return return:yes FunctionDef name:disable_features arg:self arg:value arguments arg arg Assign FunctionDef name:__call__ arg:self arguments arg arg arg arg arg If Assign Call Call Call Assign Call If Compare Assign Call Assign Assign Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "items", + "source_code": "def items(self):\n return Mapping.items(self)", + "docstring": "D.items() returns a set-like object providing a view on the items", + "type": "method", + "file_path": "numpy\\numpy\\lib\\_npyio_impl.py", + "ast_data": "FunctionDef name:items arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "scale_fn", + "source_code": "def scale_fn(self, x) -> float:\n if self._scale_fn_custom is not None:\n return self._scale_fn_custom(x)\n else:\n return self._scale_fn_ref(x)", + "docstring": "Get the scaling policy.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "FunctionDef name:scale_fn arg:self arg:x arguments arg arg If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_TPUInferenceContext", + "source_code": "class _TPUInferenceContext(control_flow_ops.XLAControlFlowContext):\n\n def __init__(self, name: Text, check_ops: bool=True):\n super(_TPUInferenceContext, self).__init__()\n self._name = name\n self._check_ops = check_ops\n\n def AddOp(self, op):\n self._AddOpInternal(op)\n\n def _AddOpInternal(self, op):\n if self._check_ops and op.type in _DENYLISTED_INFERENCE_OPS:\n raise NotImplementedError(f'Operation of type {op.type} ({op.name}) is not supported on the TPU for inference. Execution will fail if this op is used in the graph. Make sure your variables are using variable_scope.')\n if self._outer_context:\n self._outer_context.AddInnerOp(op)\n\n def AddValue(self, val):\n result = val\n if self._outer_context:\n result = self._outer_context.AddValue(val)\n return result\n\n def AddInnerOp(self, op):\n self._AddOpInternal(op)\n\n @property\n def grad_state(self):\n return None", + "docstring": "A for nodes inside a TPU inference computation. The primary role of is to indicate the mode of operation and possibly sanity check operators inside a tpu.rewrite_for_inference() computation.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py", + "ast_data": "ClassDef name:_TPUInferenceContext FunctionDef name:__init__ arg:self arg:name arg:check_ops arguments arg arg arg Call Call Assign Assign FunctionDef name:AddOp arg:self arg:op arguments arg arg Call FunctionDef name:_AddOpInternal arg:self arg:op arguments arg arg If BoolOp Compare Raise Call If Call FunctionDef name:AddValue arg:self arg:val arguments arg arg Assign If Assign Call Return return:yes FunctionDef name:AddInnerOp arg:self arg:op arguments arg arg Call FunctionDef name:grad_state arg:self arguments arg Return return:no" + }, + { + "library": "pandas", + "name": "itemsize", + "source_code": "@cache_readonly\ndef itemsize(self) -> int:\n return self.numpy_dtype.itemsize", + "docstring": "Return the number of bytes in this dtype", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", + "ast_data": "FunctionDef name:itemsize arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_facecolor", + "source_code": "def get_facecolor(self):\n return self.patch.get_facecolor()", + "docstring": "Get the facecolor of the Axes.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:get_facecolor arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "rank_internal", + "source_code": "def rank_internal(input, name=None, optimize=True):\n with ops.name_scope(name, 'Rank', [input]) as name:\n if isinstance(input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):\n return gen_array_ops.size(input.dense_shape, name=name)\n else:\n input = ops.convert_to_tensor(input)\n input_shape = input.get_shape()\n if optimize and input_shape.ndims is not None:\n return constant(input_shape.ndims, dtypes.int32, name=name)\n return gen_array_ops.rank(input, name=name)", + "docstring": "Returns the rank of a tensor. Args: input: A or . name: A name for the operation (optional). optimize: if true, encode the rank as a constant when possible. Returns: A of type .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:rank_internal arg:input arg:name arg:optimize arguments arg arg arg With Call If Call Return return:yes Call Assign Call Assign Call If BoolOp Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "shear_x", + "source_code": "def shear_x(probability: float, magnitude: int) -> OperationBase:\n magnitudes = linspace(-0.3, 0.3, 11) * 180.0\n return ShearX(None, probability, magnitude_range=(magnitudes[magnitude].item(), magnitudes[magnitude + 1].item()), symmetric_megnitude=False)", + "docstring": "Return ShearX op.", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\auto\\autoaugment\\ops.py", + "ast_data": "FunctionDef name:shear_x arg:probability arg:magnitude arguments arg arg Assign Call Return return:yes Call Call Call" + }, + { + "library": "pandas", + "name": "arrays", + "source_code": "@property\ndef arrays(self) -> list[ArrayLike]:\n return [blk.values for blk in self.blocks]", + "docstring": "Quick access to the backing arrays of the Blocks. Only for compatibility with ArrayManager for testing convenience. Not to be used in actual code, and return value is not the same as the ArrayManager method (list of 1D arrays vs iterator of 2D ndarrays / 1D EAs). Warning! The returned arrays don't handle Copy-on-Write, so this should be used with caution (only in read-mode).", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:arrays arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_sparsity_modes", + "source_code": "def get_sparsity_modes(model_object):\n if not model_object or not model_object.metadata:\n return []\n result = set()\n for subgraph in model_object.subgraphs:\n for tensor in subgraph.tensors:\n if not tensor.sparsity:\n continue\n if tensor.sparsity.blockMap.size == 0 or not tensor.sparsity.blockMap:\n result.add(conversion_metadata_fb.ModelOptimizationMode.RANDOM_SPARSITY)\n else:\n result.add(conversion_metadata_fb.ModelOptimizationMode.BLOCK_SPARSITY)\n return list(result)", + "docstring": "Get sparsity modes used in a tflite model. The sparsity modes are listed in conversion_metadata.fbs file. Args: model_object: A tflite model in object form. Returns: The list of sparsity modes used in the model.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py", + "ast_data": "FunctionDef name:get_sparsity_modes arg:model_object arguments arg If BoolOp Return return:no Assign Call For For If If BoolOp Compare Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_batchable_flat_tensor_specs", + "source_code": "def get_batchable_flat_tensor_specs(spec, context_spec=None):\n if isinstance(spec, internal.TensorSpec):\n return [spec]\n elif hasattr(spec, '__batch_encoder__'):\n encoding_specs = nest.map_structure(functools.partial(get_batchable_flat_tensor_specs, context_spec=context_spec), spec.__batch_encoder__.encoding_specs(spec))\n return nest.flatten(encoding_specs)\n else:\n warnings.warn(f'Batchable type {context_spec} contains non-batchable field or component with type {spec}.')\n return spec._flat_tensor_specs", + "docstring": "Returns the flat tensor specs for .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", + "ast_data": "FunctionDef name:get_batchable_flat_tensor_specs arg:spec arg:context_spec arguments arg arg If Call Return return:yes If Call Assign Call Call Call Return return:yes Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_layer_sqnr_dict", + "source_code": "def get_layer_sqnr_dict(model_a: nn.Module, model_b: nn.Module, x: torch.Tensor) -> dict[str, float]:\n import torch.ao.ns._numeric_suite_fx as ns\n from torch.ao.ns.fx.mappings import get_unmatchable_types_map\n unmatchable_types_map = get_unmatchable_types_map()\n unmatchable_types_map['funs_unmatchable'].add(torch.mul)\n model_a_ns, model_b_ns = ns.add_loggers('fp32', model_a, 'int8', model_b, ns.OutputLogger, unmatchable_types_map=unmatchable_types_map)\n model_a_ns(x)\n model_b_ns(x)\n activation_comparison_dict = ns.extract_logger_info(model_a_ns, model_b_ns, ns.OutputLogger, 'int8')\n ns.extend_logger_results_with_comparison(activation_comparison_dict, 'fp32', 'int8', torch.ao.ns.fx.utils.compute_sqnr, 'sqnr')\n layer_sqnr_dict = {}\n for key in activation_comparison_dict:\n layer = activation_comparison_dict[key]['node_output']['int8'][0]['fqn']\n sqnr = activation_comparison_dict[key]['node_output']['int8'][0]['sqnr'][0]\n layer_sqnr_dict[layer] = sqnr\n return layer_sqnr_dict", + "docstring": "Runs the Numeric Suite on model_a and model_b and returns a dictionary containing the SQNR between layers in model_a and model_b. Note: In order to support equalized models, this function has a hacky fix in which we do not match any torch.mul operators. This is because equalized models contain extra mul operators to scale the input by the equalization scale, but this edge case has not been resolved yet within the numeric suite code. Args: model_a: A float model model_b: A quantized model x: Inputs to use during calibration", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py", + "ast_data": "FunctionDef name:get_layer_sqnr_dict arg:model_a arg:model_b arg:x arguments arg arg arg Assign Call Call Assign Call Call Call Assign Call Call Assign For Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "delete", + "source_code": "def delete(self):\n if not self._auto_gc_enabled:\n raise TypeError('Persistent tensor %s may have already been deleted.' % self.handle)\n self._auto_gc_enabled = False\n holder, deleter = _get_handle_deleter(self._session.graph, 0, self._handle)\n self._session.run(deleter, feed_dict={holder: self.handle})", + "docstring": "Force the deletion of this persistent tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py", + "ast_data": "FunctionDef name:delete arg:self arguments arg If Raise Call Assign Assign Call Call" + }, + { + "library": "pytorch", + "name": "convert_to_pool_lines", + "source_code": "def convert_to_pool_lines(self, lines):\n name_to_group = self.compute_buffer_groups(lines)\n for i, line in enumerate(lines):\n if isinstance(line, AllocateLine):\n if line.node.get_name() in name_to_group:\n lines[i] = AllocFromPoolLine(self.wrapper, name_to_group[line.node.get_name()])\n elif isinstance(line, FreeIfNotReusedLine):\n assert not line.is_reused\n if line.node.get_name() in name_to_group:\n lines[i] = DeallocFromPoolLine(self.wrapper, name_to_group[line.node.get_name()])\n elif isinstance(line, ReuseLine):\n if line.node.get_name() in name_to_group:\n line.delete_old = False", + "docstring": "Convert AllocateLine/FreeIfNotReusedLine/ReuseLine into their pool-based counterparts.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py", + "ast_data": "FunctionDef name:convert_to_pool_lines arg:self arg:lines arguments arg arg Assign Call For Call If Call If Compare Call Assign Call Call If Call If Compare Call Assign Call Call If Call If Compare Call Assign" + }, + { + "library": "kornia", + "name": "_create_session", + "source_code": "def _create_session(self, op: onnx.ModelProto, providers: Optional[list[str]]=None, session_options: Optional[ort.InferenceSession]=None) -> ort.InferenceSession:\n if session_options is None:\n sess_options = ort.SessionOptions()\n sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED\n session = ort.InferenceSession(op.SerializeToString(), sess_options=sess_options, providers=providers or ['CPUExecutionProvider'])\n return session", + "docstring": "Create an optimized ONNXRuntime InferenceSession for the combined model. Args: op: Onnx operation. providers: Execution providers for ONNXRuntime (e.g., ['CUDAExecutionProvider', 'CPUExecutionProvider']). session_options: Optional ONNXRuntime session options for session configuration and optimizations. Returns: ort.InferenceSession: The ONNXRuntime session optimized for inference.", + "type": "method", + "file_path": "kornia\\kornia\\core\\mixin\\onnx.py", + "ast_data": "FunctionDef name:_create_session arg:self arg:op arg:providers arg:session_options arguments arg arg arg arg If Compare Assign Call Assign Assign Call Call BoolOp Return return:yes" + }, + { + "library": "cherrypy", + "name": "Cache", + "source_code": "class Cache(object):\n\n def get(self):\n raise NotImplementedError\n\n def put(self, obj, size):\n raise NotImplementedError\n\n def delete(self):\n raise NotImplementedError\n\n def clear(self):\n raise NotImplementedError", + "docstring": "Base class for Cache implementations.", + "type": "class", + "file_path": "cherrypy\\cherrypy\\lib\\caching.py", + "ast_data": "ClassDef name:Cache FunctionDef name:get arg:self arguments arg Raise FunctionDef name:put arg:self arg:obj arg:size arguments arg arg arg Raise FunctionDef name:delete arg:self arguments arg Raise FunctionDef name:clear arg:self arguments arg Raise" + }, + { + "library": "pytorch", + "name": "skip", + "source_code": "def skip(fn=None):\n if fn is None:\n return skip\n fn = innermost_fn(fn)\n assert callable(fn)\n skip_code(fn.__code__)\n fn._torchdynamo_disable = True\n return fn", + "docstring": "Skip frames associated with the function code, but still process recursively invoked frames", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\decorators.py", + "ast_data": "FunctionDef name:skip arg:fn arguments arg If Compare Return return:yes Assign Call Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "sample_n", + "source_code": "@deprecated('`sample_n(n)` will be deprecated. Use `sample((n,))` instead.', category=FutureWarning)\ndef sample_n(self, n: int) -> Tensor:\n return self.sample(torch.Size((n,)))", + "docstring": "Generates n samples or n batches of samples if the distribution parameters are batched.", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\distribution.py", + "ast_data": "FunctionDef name:sample_n arg:self arg:n arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "numpy", + "source_code": "def numpy(self) -> npt.ArrayLike:\n maybe_arr = self._numpy()\n return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr", + "docstring": "Copy of the contents of this Tensor into a NumPy array or scalar. Unlike NumPy arrays, Tensors are immutable, so this method has to copy the contents to ensure safety. Use to get a readonly view of the contents without doing a copy: >>> t = tf.constant([42]) >>> np.asarray(memoryview(t)) array([42], dtype=int32) Note that is only zero-copy for Tensors on CPU. If a Tensor is on GPU, it will have to be transferred to CPU first in order for to work. Returns: A NumPy array of the same shape and dtype or a NumPy scalar, if this Tensor has rank 0. Raises: ValueError: If the dtype of this Tensor does not have a compatible NumPy dtype.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:numpy arg:self arguments arg Assign Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "ndim", + "source_code": "@property\ndef ndim(self) -> int:\n return 1", + "docstring": "Number of dimensions of the underlying data, by definition 1. See Also -------- Series.size: Return the number of elements in the underlying data. Series.shape: Return a tuple of the shape of the underlying data. Series.dtype: Return the dtype object of the underlying data. Series.values: Return Series as ndarray or ndarray-like depending on the dtype. Examples -------- >>> s = pd.Series([\"Ant\", \"Bear\", \"Cow\"]) >>> s 0 Ant 1 Bear 2 Cow dtype: object >>> s.ndim 1 For Index: >>> idx = pd.Index([1, 2, 3]) >>> idx Index([1, 2, 3], dtype='int64') >>> idx.ndim 1", + "type": "method", + "file_path": "pandas\\pandas\\core\\base.py", + "ast_data": "FunctionDef name:ndim arg:self arguments arg Return return:yes" + }, + { + "library": "scrapy", + "name": "copy", + "source_code": "def copy(self) -> Self:\n return self.replace()", + "docstring": "Return a copy of this Response", + "type": "method", + "file_path": "scrapy\\scrapy\\http\\response\\__init__.py", + "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "visit", + "source_code": "def visit(unused_path, unused_parent, children):\n for child in children:\n _, attr = tf_decorator.unwrap(child[1])\n api_names_v2 = tf_export.get_v2_names(attr)\n for name in api_names_v2:\n v2_names.add(name)", + "docstring": "Visitor that collects TF 2.0 names.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\update\\generate_v2_renames_map.py", + "ast_data": "FunctionDef name:visit arg:unused_path arg:unused_parent arg:children arguments arg arg arg For Assign Call Assign Call For Call" + }, + { + "library": "numpy", + "name": "PathScaleCCompiler", + "source_code": "class PathScaleCCompiler(UnixCCompiler):\n compiler_type = 'pathcc'\n cc_exe = 'pathcc'\n cxx_exe = 'pathCC'\n\n def __init__(self, verbose=0, dry_run=0, force=0):\n UnixCCompiler.__init__(self, verbose, dry_run, force)\n cc_compiler = self.cc_exe\n cxx_compiler = self.cxx_exe\n self.set_executables(compiler=cc_compiler, compiler_so=cc_compiler, compiler_cxx=cxx_compiler, linker_exe=cc_compiler, linker_so=cc_compiler + ' -shared')", + "docstring": "PathScale compiler compatible with an gcc built Python.", + "type": "class", + "file_path": "numpy\\numpy\\distutils\\pathccompiler.py", + "ast_data": "ClassDef name:PathScaleCCompiler Assign Assign Assign FunctionDef name:__init__ arg:self arg:verbose arg:dry_run arg:force arguments arg arg arg arg Call Assign Assign Call" + }, + { + "library": "pytorch", + "name": "MixedPrecision", + "source_code": "@dataclass\nclass MixedPrecision:\n param_dtype: Optional[torch.dtype] = None\n reduce_dtype: Optional[torch.dtype] = None\n buffer_dtype: Optional[torch.dtype] = None\n keep_low_precision_grads: bool = False\n cast_forward_inputs: bool = False\n cast_root_forward_inputs: bool = True\n _module_classes_to_ignore: Sequence[type[torch.nn.Module]] = (_BatchNorm,)", + "docstring": "This configures FSDP-native mixed precision training. Attributes: param_dtype (Optional[torch.dtype]): This specifies the dtype for model parameters during forward and backward and thus the dtype for forward and backward computation. Outside forward and backward, the *sharded* parameters are kept in full precision (e.g. for the optimizer step), and for model checkpointing, the parameters are always saved in full precision. (Default: `` ones.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\fsdp\\api.py", + "ast_data": "ClassDef name:MixedPrecision" + }, + { + "library": "scikit-learn", + "name": "row_norms", + "source_code": "def row_norms(X, squared=False):\n if sparse.issparse(X):\n X = X.tocsr()\n norms = csr_row_norms(X)\n if not squared:\n norms = np.sqrt(norms)\n else:\n xp, _ = get_namespace(X)\n if _is_numpy_namespace(xp):\n X = np.asarray(X)\n norms = np.einsum('ij,ij->i', X, X)\n norms = xp.asarray(norms)\n else:\n norms = xp.sum(xp.multiply(X, X), axis=1)\n if not squared:\n norms = xp.sqrt(norms)\n return norms", + "docstring": "Row-wise (squared) Euclidean norm of X. Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse matrices and does not create an X.shape-sized temporary. Performs no input validation. Parameters ---------- X : array-like The input array. squared : bool, default=False If True, return squared norms. Returns ------- array-like The row-wise (squared) Euclidean norm of X.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\extmath.py", + "ast_data": "FunctionDef name:row_norms arg:X arg:squared arguments arg arg If Call Assign Call Assign Call If Assign Call Assign Call If Call Assign Call Assign Call Assign Call Assign Call Call If Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "group_norm", + "source_code": "def group_norm(input: Tensor, num_groups: int, weight: Optional[Tensor]=None, bias: Optional[Tensor]=None, eps: float=1e-05) -> Tensor:\n torch._check(input.ndim >= 2, lambda: f'Expected at least 2 dimensions for input tensor but received {input.ndim}')\n batch_size = input.shape[0]\n num_channels = input.shape[1]\n torch._check(num_channels % num_groups == 0, lambda: 'Expected number of channels in input to be divisible by num_groups, ' + f'but got input of shape {input.shape} and num_groups = {num_groups}')\n flattened_inner_size = 1\n for dim_length in input.shape[2:]:\n flattened_inner_size *= dim_length\n return torch.native_group_norm(input, weight, bias, batch_size, num_channels, flattened_inner_size, num_groups, eps)[0]", + "docstring": "Reference implementation of :func:.", + "type": "function", + "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py", + "ast_data": "FunctionDef name:group_norm arg:input arg:num_groups arg:weight arg:bias arg:eps arguments arg arg arg arg arg Call Compare arguments Assign Assign Call Compare arguments Assign For Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "rotate_deg", + "source_code": "def rotate_deg(self, degrees):\n return self.rotate(math.radians(degrees))", + "docstring": "Add a rotation (in degrees) to this transform in place. Returns *self*, so this method can easily be chained with more calls to :meth:, :meth:, :meth: and :meth:.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:rotate_deg arg:self arg:degrees arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "deref", + "source_code": "def deref(self):\n return self._wrapped", + "docstring": "Returns the referenced object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\util\\object_identity.py", + "ast_data": "FunctionDef name:deref arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "in_load_context", + "source_code": "def in_load_context():\n return _load_context.in_load_context()", + "docstring": "Returns whether under a load context.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\load_context.py", + "ast_data": "FunctionDef name:in_load_context arguments Return return:yes Call" + }, + { + "library": "kornia", + "name": "__init__", + "source_code": "def __init__(self, num_dims: int, num_units: int=2, num_unit_layers: int=4, num_hidden: int=128) -> None:\n super().__init__()\n self._num_unit_layers = num_unit_layers\n layers = []\n for i in range(num_units):\n num_unit_inp_dims = num_dims if i == 0 else num_hidden + num_dims\n for j in range(num_unit_layers):\n num_layer_inp_dims = num_unit_inp_dims if j == 0 else num_hidden\n layer = nn.Linear(num_layer_inp_dims, num_hidden)\n layers.append(nn.Sequential(layer, nn.ReLU()))\n self._mlp = nn.ModuleList(layers)", + "docstring": "Construct MLP. Args: num_dims: Number of input dimensions (channels). num_units: Number of sub-units. num_unit_layers: Number of fully connected layers in each sub-unit. num_hidden: Layer hidden dimensions.", + "type": "method", + "file_path": "kornia\\kornia\\nerf\\nerf_model.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:num_dims arg:num_units arg:num_unit_layers arg:num_hidden arguments arg arg arg arg arg Call Call Assign Assign For Call Assign Compare For Call Assign Compare Assign Call Call Call Call Assign Call" + }, + { + "library": "matplotlib", + "name": "_mpl_coords", + "source_code": "def _mpl_coords(self, pos=None):\n if pos is None:\n pos = wx.GetMouseState()\n x, y = self.ScreenToClient(pos.X, pos.Y)\n else:\n x, y = (pos.X, pos.Y)\n if not wx.Platform == '__WXMSW__':\n scale = self.GetDPIScaleFactor()\n return (x * scale, self.figure.bbox.height - y * scale)\n else:\n return (x, self.figure.bbox.height - y)", + "docstring": "Convert a wx position, defaulting to the current cursor position, to Matplotlib coordinates.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py", + "ast_data": "FunctionDef name:_mpl_coords arg:self arg:pos arguments arg arg If Compare Assign Call Assign Call Assign If Compare Assign Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "draw", + "source_code": "def draw(self):\n if self._is_drawing:\n return\n with cbook._setattr_cm(self, _is_drawing=True):\n super().draw()\n self.update()", + "docstring": "Render the figure, and queue a request for a Qt draw.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_qt.py", + "ast_data": "FunctionDef name:draw arg:self arguments arg If Return return:no With Call Call Call Call" + }, + { + "library": "pytorch", + "name": "run_decompositions", + "source_code": "@_disable_prexisiting_fake_mode\ndef run_decompositions(self, decomp_table: Optional[dict[torch._ops.OperatorBase, Callable]]=None, decompose_custom_triton_ops: bool=False) -> 'ExportedProgram':\n _decomp_table = default_decompositions() if decomp_table is None else dict(decomp_table)\n if isinstance(_decomp_table, CustomDecompTable):\n _decomp_table = _decomp_table.materialize()\n cia_to_decomp, python_decomp_table = _split_decomp_table_to_cia_and_python_decomp(_decomp_table)\n return _decompose_exported_program(self, cia_to_decomp=cia_to_decomp, python_decomp_table=python_decomp_table, joint_loss_index=None, decompose_custom_triton_ops=decompose_custom_triton_ops)", + "docstring": "Run a set of decompositions on the exported program and returns a new exported program. By default we will run the Core ATen decompositions to get operators in the _. For now, we do not decompose joint graphs. Args: decomp_table: An optional argument that specifies decomp behaviour for Aten ops (1) If None, we decompose to core aten decompositions (2) If empty, we don't decompose any operator Some examples: If you don't want to decompose anything .. code-block:: python ep = torch.export.export(model, ...) ep = ep.run_decompositions(decomp_table={}) If you want to get a core aten operator set except for certain operator, you can do following: .. code-block:: python ep = torch.export.export(model, ...) decomp_table = torch.export.default_decompositions() decomp_table[your_op] = your_custom_decomp ep = ep.run_decompositions(decomp_table=decomp_table)", + "type": "method", + "file_path": "pytorch\\torch\\export\\exported_program.py", + "ast_data": "FunctionDef name:run_decompositions arg:self arg:decomp_table arg:decompose_custom_triton_ops arguments arg arg arg Assign Compare Call Call If Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_do_batch_all_reduce_sparse", + "source_code": "def _do_batch_all_reduce_sparse(self, reduce_op, sparse_values):\n logging.log_first_n(logging.WARN, 'Efficient allreduce is not supported for %d IndexedSlices' % len(sparse_values), 10)\n return self._simple_cross_replica_ops.batch_reduce(reduce_op, zip(sparse_values, sparse_values))", + "docstring": "Run batch all-reduce for sparse values.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py", + "ast_data": "FunctionDef name:_do_batch_all_reduce_sparse arg:self arg:reduce_op arg:sparse_values arguments arg arg arg Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_get_group_tag", + "source_code": "def _get_group_tag(pg: ProcessGroup) -> str:\n tag = _world.pg_to_tag[pg]\n tag = tag.removeprefix('user:')\n return tag", + "docstring": "Return the tag associated with ``.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:_get_group_tag arg:pg arguments arg Assign Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "default_fill_value", + "source_code": "def default_fill_value(obj):\n\n def _scalar_fill_value(dtype):\n if dtype.kind in 'Mm':\n return default_filler.get(dtype.str[1:], '?')\n else:\n return default_filler.get(dtype.kind, '?')\n dtype = _get_dtype_of(obj)\n return _recursive_fill_value(dtype, _scalar_fill_value)", + "docstring": "Return the default fill value for the argument object. The default filling value depends on the datatype of the input array or the type of the input scalar: ======== ======== datatype default ======== ======== bool True int 999999 float 1.e20 complex 1.e20+0j object '?' string 'N/A' ======== ======== For structured types, a structured scalar is returned, with each field the default fill value for its type. For subarray types, the fill value is an array of the same size containing the default scalar fill value. Parameters ---------- obj : ndarray, dtype or scalar The array data-type or scalar for which the default fill value is returned. Returns ------- fill_value : scalar The default fill value. Examples -------- >>> import numpy as np >>> np.ma.default_fill_value(1) 999999 >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) 1e+20 >>> np.ma.default_fill_value(np.dtype(complex)) (1e+20+0j)", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:default_fill_value arg:obj arguments arg FunctionDef name:_scalar_fill_value arg:dtype arguments arg If Compare Return return:yes Call Return return:yes Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "p_range", + "source_code": "def p_range(self, n: int, p0: int | None=None, p1: int | None=None) -> tuple[int, int]:\n p_max = self.p_max(n)\n p0_ = self.p_min if p0 is None else p0\n p1_ = p_max if p1 is None else p1\n if not self.p_min <= p0_ < p1_ <= p_max:\n raise ValueError(f'Invalid Parameter p0={p0!r}, p1={p1!r}, i.e., ' + f'self.p_min={self.p_min!r} <= p0 < p1 <= p_max={p_max!r} ' + f'does not hold for signal length n={n!r}!')\n return (p0_, p1_)", + "docstring": "Determine and validate slice index range. Parameters ---------- n : int Number of samples of input signal, assuming t[0] = 0. p0 : int | None First slice index. If 0 then the first slice is centered at t = 0. If `p_minp_max(n)p_maxp_min`. upper_border_begin: Where post-padding effects start. ShortTimeFFT: Class this property belongs to.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_short_time_fft.py", + "ast_data": "FunctionDef name:p_range arg:self arg:n arg:p0 arg:p1 arguments arg arg arg arg Assign Call Assign Compare Assign Compare If Compare Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "DeadlineExceededError", + "source_code": "@tf_export('errors.DeadlineExceededError')\nclass DeadlineExceededError(OpError):\n\n def __init__(self, node_def, op, message, *args):\n super(DeadlineExceededError, self).__init__(node_def, op, message, DEADLINE_EXCEEDED, *args)", + "docstring": "Raised when a deadline expires before an operation could complete. This exception is not currently used.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", + "ast_data": "ClassDef name:DeadlineExceededError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "_session_run_lock", + "source_code": "def _session_run_lock(self) -> lock_util.GroupLock._Context:\n return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)", + "docstring": "Returns a lock to guard code for Session.run. See the comment for self._group_lock for more info.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_session_run_lock arg:self arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, desc1: Tensor, desc2: Tensor, lafs1: Tensor, lafs2: Tensor) -> Tuple[Tensor, Tensor]:\n if self.match_mode == 'fginn':\n params = _get_default_fginn_params()\n params.update(self.params)\n out = match_fginn(desc1, desc2, lafs1, lafs2, params['th'], params['spatial_th'], params['mutual'])\n elif self.match_mode == 'adalam':\n _params = get_adalam_default_config()\n _params.update(self.params)\n out = match_adalam(desc1, desc2, lafs1, lafs2, config=_params)\n else:\n raise NotImplementedError\n return out", + "docstring": "Run forward. Args: desc1: Batch of descriptors of a shape :math:. desc2: Batch of descriptors of a shape :math:. lafs1: LAFs of a shape :math:. lafs2: LAFs of a shape :math:. Returns: - Descriptor distance of matching descriptors, shape of :math:. - Long tensor indexes of matching descriptors in desc1 and desc2, shape of :math: where :math:.", + "type": "method", + "file_path": "kornia\\kornia\\feature\\matching.py", + "ast_data": "FunctionDef name:forward arg:self arg:desc1 arg:desc2 arg:lafs1 arg:lafs2 arguments arg arg arg arg arg If Compare Assign Call Call Assign Call If Compare Assign Call Call Assign Call Raise Return return:yes" + }, + { + "library": "sphinx", + "name": "FiletypeNotFoundError", + "source_code": "class FiletypeNotFoundError(Exception):\n pass", + "docstring": "Raised by get_filetype() if a filename matches no source suffix.", + "type": "class", + "file_path": "sphinx\\sphinx\\errors.py", + "ast_data": "ClassDef name:FiletypeNotFoundError" + }, + { + "library": "tensorflow", + "name": "on_epoch_end", + "source_code": "def on_epoch_end(self, epoch, logs=None):\n self._log_epoch_metrics(epoch, logs)\n if self.histogram_freq and epoch % self.histogram_freq == 0:\n self._log_weights(epoch)\n if self.embeddings_freq and epoch % self.embeddings_freq == 0:\n self._log_embeddings(epoch)", + "docstring": "Runs metrics and histogram summaries at epoch end.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:on_epoch_end arg:self arg:epoch arg:logs arguments arg arg arg Call If BoolOp Compare Call If BoolOp Compare Call" + }, + { + "library": "tensorflow", + "name": "true_negatives", + "source_code": "@tf_export(v1=['metrics.true_negatives'])\ndef true_negatives(labels, predictions, weights=None, metrics_collections=None, updates_collections=None, name=None):\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.true_negatives is not supported when eager execution is enabled.')\n with variable_scope.variable_scope(name, 'true_negatives', (predictions, labels, weights)):\n predictions, labels, weights = _remove_squeezable_dimensions(predictions=math_ops.cast(predictions, dtype=dtypes.bool), labels=math_ops.cast(labels, dtype=dtypes.bool), weights=weights)\n is_true_negative = math_ops.logical_and(math_ops.equal(labels, False), math_ops.equal(predictions, False))\n return _count_condition(is_true_negative, weights, metrics_collections, updates_collections)", + "docstring": "Sum the weights of true_negatives. If is , weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a whose dimensions must match . Will be cast to . predictions: The predicted values, a of arbitrary dimensions. Will be cast to . weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If and have mismatched shapes, or if is not and its shape doesn't match , or if either or are not a list or tuple. RuntimeError: If eager execution is enabled.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py", + "ast_data": "FunctionDef name:true_negatives arg:labels arg:predictions arg:weights arg:metrics_collections arg:updates_collections arg:name arguments arg arg arg arg arg arg If Call Raise Call With Call Assign Call Call Call Assign Call Call Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "random", + "source_code": "@classmethod\ndef random(cls, batch_size: Optional[int]=None, device: Optional[Device]=None, dtype: Dtype=None) -> Se2:\n r = So2.random(batch_size, device, dtype)\n shape: tuple[int, ...]\n if batch_size is None:\n shape = (2,)\n else:\n KORNIA_CHECK(batch_size >= 1, msg='batch_size must be positive')\n shape = (batch_size, 2)\n return cls(r, Vector2(rand(shape, device=device, dtype=dtype)))", + "docstring": "Create a Se2 group representing a random transformation. Args: batch_size: the batch size of the underlying data. device: device to place the result on. dtype: dtype of the result. Example: >>> s = Se2.random() >>> s = Se2.random(batch_size=3)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py", + "ast_data": "FunctionDef name:random arg:cls arg:batch_size arg:device arg:dtype arguments arg arg arg arg Assign Call If Compare Assign Call Compare Assign Return return:yes Call Call Call" + }, + { + "library": "pandas", + "name": "get_unanimous_names", + "source_code": "def get_unanimous_names(*indexes: Index) -> tuple[Hashable, ...]:\n name_tups = (tuple(i.names) for i in indexes)\n name_sets = ({*ns} for ns in zip_longest(*name_tups))\n names = tuple((ns.pop() if len(ns) == 1 else None for ns in name_sets))\n return names", + "docstring": "Return common name if all indices agree, otherwise None (level-by-level). Parameters ---------- indexes : list of Index objects Returns ------- list A list representing the unanimous 'names' found.", + "type": "function", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:get_unanimous_names arguments arg Assign Call Assign Call Assign Call Compare Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_num_fig_managers", + "source_code": "@classmethod\ndef get_num_fig_managers(cls):\n return len(cls.figs)", + "docstring": "Return the number of figures being managed.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_pylab_helpers.py", + "ast_data": "FunctionDef name:get_num_fig_managers arg:cls arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_show_ops_in_metagraph", + "source_code": "def _show_ops_in_metagraph(saved_model_dir, tag_set):\n meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)\n _show_ops_in_metagraph_mgd(meta_graph_def)", + "docstring": "Prints the ops in the MetaGraph. Prints all the ops used in the MetaGraphDef indicated by the tag_set stored in SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect. tag_set: Group of tag(s) of the MetaGraphDef in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py", + "ast_data": "FunctionDef name:_show_ops_in_metagraph arg:saved_model_dir arg:tag_set arguments arg arg Assign Call Call" + }, + { + "library": "tensorflow", + "name": "acquire", + "source_code": "def acquire(self, group_id):\n self._validate_group_id(group_id)\n self._ready.acquire()\n while self._another_group_active(group_id):\n self._ready.wait()\n self._group_member_counts[group_id] += 1\n self._ready.release()", + "docstring": "Acquire the group lock for a specific group .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\util\\lock_util.py", + "ast_data": "FunctionDef name:acquire arg:self arg:group_id arguments arg arg Call Call While Call Call Call" + }, + { + "library": "matplotlib", + "name": "_update_offset_text_position", + "source_code": "def _update_offset_text_position(self, bboxes, bboxes2):\n x, _ = self.offsetText.get_position()\n if 'outline' in self.axes.spines:\n bbox = self.axes.spines['outline'].get_window_extent()\n else:\n bbox = self.axes.bbox\n top = bbox.ymax\n self.offsetText.set_position((x, top + self.OFFSETTEXTPAD * self.get_figure(root=True).dpi / 72))", + "docstring": "Update the offset_text position based on the sequence of bounding boxes of all the ticklabels", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:_update_offset_text_position arg:self arg:bboxes arg:bboxes2 arguments arg arg arg Assign Call If Compare Assign Call Assign Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_elementwise_where_v2", + "source_code": "def _elementwise_where_v2(condition, x, y):\n if not (condition.shape.is_fully_defined() and x.shape.is_fully_defined() and y.shape.is_fully_defined() and (x.shape == y.shape) and (condition.shape == x.shape)):\n shape_c = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(condition)\n shape_x = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(x)\n shape_y = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(y)\n shape = ragged_tensor_shape.broadcast_dynamic_shape(shape_c, ragged_tensor_shape.broadcast_dynamic_shape(shape_x, shape_y))\n condition = ragged_tensor_shape.broadcast_to(condition, shape)\n x = ragged_tensor_shape.broadcast_to(x, shape)\n y = ragged_tensor_shape.broadcast_to(y, shape)\n condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor)\n x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor)\n y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor)\n if not (condition_is_ragged or x_is_ragged or y_is_ragged):\n return array_ops.where_v2(condition, x, y)\n return ragged_functional_ops.map_flat_values(array_ops.where_v2, condition, x, y)", + "docstring": "Ragged version of tf.where_v2(condition, x, y).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_where_op.py", + "ast_data": "FunctionDef name:_elementwise_where_v2 arg:condition arg:x arg:y arguments arg arg arg If BoolOp Call Call Call Compare Compare Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If BoolOp Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "inner_optimizer", + "source_code": "@property\ndef inner_optimizer(self):\n return self._optimizer", + "docstring": "The optimizer that this LossScaleOptimizer is wrapping.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py", + "ast_data": "FunctionDef name:inner_optimizer arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_IDCounter", + "source_code": "class _IDCounter:\n\n def __init__(self, prefix):\n self.prefix = prefix\n self.count = 0\n\n def get_id(self):\n self.count += 1\n return f'{self.prefix}-{self.count}'", + "docstring": "Generate sequential ids with a prefix.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\utils\\_repr_html\\estimator.py", + "ast_data": "ClassDef name:_IDCounter FunctionDef name:__init__ arg:self arg:prefix arguments arg arg Assign Assign FunctionDef name:get_id arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "loop_len_vector", + "source_code": "@property\ndef loop_len_vector(self):\n return self._loop_len_vector", + "docstring": "Returns a single element vector whose value is number of iterations.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:loop_len_vector arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "average_parameters", + "source_code": "def average_parameters(params: Iterator[torch.nn.Parameter], process_group: ProcessGroup):\n group_to_use = process_group if process_group is not None else group.WORLD\n if dist._rank_not_in_group(group_to_use):\n return\n params_it1, params_it2 = itertools.tee(params)\n flat_params = torch.cat([p.data.reshape(-1) for p in params_it1])\n flat_params /= dist.get_world_size(group_to_use)\n if torch.accelerator.is_available():\n torch.accelerator.synchronize()\n dist.all_reduce(flat_params, group=group_to_use)\n offset = 0\n for p in params_it2:\n p.data = flat_params[offset:offset + p.numel()].view_as(p).type_as(p)\n offset += p.numel()", + "docstring": "Averages all the given parameters. For allreduce efficiency, all the parameters are flattened into a contiguous buffer. Thus, it requires extra memory of the same size as the given parameters.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\algorithms\\model_averaging\\utils.py", + "ast_data": "FunctionDef name:average_parameters arg:params arg:process_group arguments arg arg Assign Compare If Call Return return:no Assign Call Assign Call Call Call If Call Call Call Assign For Assign Call Call Call Call" + }, + { + "library": "pytorch", + "name": "is_tensor_shardable", + "source_code": "def is_tensor_shardable(shape: Sequence[int], spec: DTensorSpec) -> bool:\n shards_map = [1] * len(shape)\n for i, placement in enumerate(spec.placements):\n if placement.is_shard():\n shard_dim = cast(Shard, placement).dim\n shards_map[shard_dim] *= spec.mesh.size(i)\n for i, dim_size in enumerate(shape):\n if shards_map[i] > 1 and dim_size < shards_map[i]:\n return False\n return True", + "docstring": "Check if the shape is shardable according to the spec.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\utils.py", + "ast_data": "FunctionDef name:is_tensor_shardable arg:shape arg:spec arguments arg arg Assign Call For Call If Call Assign Call Call For Call If BoolOp Compare Compare Return return:yes Return return:yes" + }, + { + "library": "kornia", + "name": "__mul__", + "source_code": "def __mul__(self, right: So3) -> So3:\n if isinstance(right, So3):\n return So3(self.q * right.q)\n elif isinstance(right, (Tensor, Vector3)):\n w = zeros(*right.shape[:-1], 1, device=right.device, dtype=right.dtype)\n quat = Quaternion(concatenate((w, right.data), -1))\n out = (self.q * quat * self.q.conj()).vec\n if isinstance(right, Tensor):\n return out\n elif isinstance(right, Vector3):\n return Vector3(out)\n else:\n raise TypeError(f'Not So3 or Tensor type. Got: {type(right)}')", + "docstring": "Compose two So3 transformations. Args: right: the other So3 transformation. Return: The resulting So3 transformation.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py", + "ast_data": "FunctionDef name:__mul__ arg:self arg:right arguments arg arg If Call Return return:yes Call If Call Assign Call Assign Call Call Assign Call If Call Return return:yes If Call Return return:yes Call Raise Call Call" + }, + { + "library": "pytorch", + "name": "conv_flop", + "source_code": "@register_flop_formula([aten.convolution, aten._convolution])\ndef conv_flop(x_shape, w_shape, _bias, _stride, _padding, _dilation, transposed, *args, out_shape=None, **kwargs) -> int:\n return conv_flop_count(x_shape, w_shape, out_shape, transposed=transposed)", + "docstring": "Count flops for convolution.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\flop_counter.py", + "ast_data": "FunctionDef name:conv_flop arg:x_shape arg:w_shape arg:_bias arg:_stride arg:_padding arg:_dilation arg:transposed arguments arg arg arg arg arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "stop_rasterizing", + "source_code": "def stop_rasterizing(self):\n pass", + "docstring": "Switch back to the vector renderer and draw the contents of the raster renderer as an image on the vector renderer. Used by .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:stop_rasterizing arg:self arguments arg" + }, + { + "library": "kornia", + "name": "transform", + "source_code": "@classmethod\ndef transform(cls, input: Tensor, module: Module, param: ParamItem, extra_args: Optional[Dict[str, Any]]=None) -> Tensor:\n if extra_args is None:\n extra_args = {}\n if isinstance(module, (K.GeometricAugmentationBase2D,)):\n input = module.transform_masks(input, params=cls.get_instance_module_param(param), flags=module.flags, transform=module.transform_matrix, **extra_args)\n elif isinstance(module, (K.GeometricAugmentationBase3D,)):\n raise NotImplementedError('The support for 3d mask operations are not yet supported. You are welcome to file a PR in our repo.')\n elif isinstance(module, K.RandomTransplantation):\n input = module(input, params=cls.get_instance_module_param(param), data_keys=[DataKey.MASK], **extra_args)\n elif isinstance(module, _AugmentationBase):\n input = module.transform_masks(input, params=cls.get_instance_module_param(param), flags=module.flags, **extra_args)\n elif isinstance(module, K.ImageSequential) and (not module.is_intensity_only()):\n input = module.transform_masks(input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n elif isinstance(module, K.container.ImageSequentialBase):\n input = module.transform_masks(input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n elif isinstance(module, (K.auto.operations.OperationBase,)):\n input = MaskSequentialOps.transform(input, module=module.op, param=param, extra_args=extra_args)\n return input", + "docstring": "Apply a transformation with respect to the parameters. Args: input: the input tensor. module: any torch Module but only kornia augmentation modules will count to apply transformations. param: the corresponding parameters to the module. extra_args: Optional dictionary of extra arguments with specific options for different input types.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\ops.py", + "ast_data": "FunctionDef name:transform arg:cls arg:input arg:module arg:param arg:extra_args arguments arg arg arg arg arg If Compare Assign If Call Assign Call Call If Call Raise Call If Call Assign Call Call If Call Assign Call Call If BoolOp Call Call Assign Call Call If Call Assign Call Call If Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "set_inter_op_parallelism_threads", + "source_code": "@tf_export('config.threading.set_inter_op_parallelism_threads')\ndef set_inter_op_parallelism_threads(num_threads):\n context.context().inter_op_parallelism_threads = num_threads", + "docstring": "Set number of threads used for parallelism between independent operations. Determines the number of threads used by independent non-blocking operations. 0 means the system picks an appropriate number. Args: num_threads: Number of parallel threads", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py", + "ast_data": "FunctionDef name:set_inter_op_parallelism_threads arg:num_threads arguments arg Assign Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, *input_types, **kwargs):\n self._input_types = input_types\n self._func_name = kwargs.pop('func_name', None)\n self._grad_func = kwargs.pop('grad_func', None)\n self._python_grad_func = kwargs.pop('python_grad_func', None)\n self._out_names = kwargs.pop('out_names', None)\n self._extra_kwargs = kwargs", + "docstring": "Create a decorator. Args: *input_types: A list of **kwargs: Optional keyword arguments, including func_name - (optional). A python string, the name to use to declare this in the graph. grad_func - (optional). A function implementing the gradient of the function-to-register. This is must be a object. The gradient function must satisfy the criterion defined in function.proto:GradientDef. python_grad_func - (optional). A function implementing the gradient of the function python-side. This function must take the current op and the gradients w.r.t. its outputs, and return the gradients w.r.t. the inputs. That is it must implement the interface expected by ). This will be called by tf.gradients to add the gradient ops to the graph. At most one of grad_func and python_grad_func can be specified. out_names = (optional). A list of strings, one per output tensor. shape_func - (optional). A function taking the op and returning a list of static shapes to set for the function's outputs.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg Assign Assign Call Assign Call Assign Call Assign Call Assign" + }, + { + "library": "scikit-learn", + "name": "predict_joint_log_proba", + "source_code": "def predict_joint_log_proba(self, X):\n check_is_fitted(self)\n X = self._check_X(X)\n return self._joint_log_likelihood(X)", + "docstring": "Return joint log probability estimates for the test vector X. For each row x of X and class y, the joint log probability is given by `classes_`.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\naive_bayes.py", + "ast_data": "FunctionDef name:predict_joint_log_proba arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_warn_overlap", + "source_code": "def _warn_overlap(self, message, kwargs):\n _kwargs = set() if self._kwargs is None else set(self._kwargs.keys())\n overlap = _kwargs.intersection(kwargs.keys())\n if overlap:\n warnings.warn(f'{message} Overlapping parameters are: {overlap}', UserWarning)", + "docstring": "Warn if there is any overlap between `` passed as metadata.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\metrics\\_scorer.py", + "ast_data": "FunctionDef name:_warn_overlap arg:self arg:message arg:kwargs arguments arg arg arg Assign Compare Call Call Call Assign Call Call If Call" + }, + { + "library": "scipy", + "name": "_getnnz", + "source_code": "def _getnnz(self, axis=None):\n clsname = self.__class__.__name__\n raise NotImplementedError(f'getnnz not implemented for {clsname}.')", + "docstring": "Number of stored values, including explicit zeros. Parameters ---------- axis : {-2, -1, 0, 1, None} optional Report stored values for the whole array, or along a specified axis. See also -------- count_nonzero : Number of non-zero entries", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_base.py", + "ast_data": "FunctionDef name:_getnnz arg:self arg:axis arguments arg arg Assign Raise Call" + }, + { + "library": "pandas", + "name": "is_nonnegative_int", + "source_code": "def is_nonnegative_int(value: object) -> None:\n if value is None:\n return\n elif isinstance(value, int):\n if value >= 0:\n return\n msg = 'Value must be a nonnegative integer or None'\n raise ValueError(msg)", + "docstring": "Verify that value is None or a positive int. Parameters ---------- value : None or int The to be checked. Raises ------ ValueError When the value is not None or is a negative integer", + "type": "function", + "file_path": "pandas\\pandas\\_config\\config.py", + "ast_data": "FunctionDef name:is_nonnegative_int arg:value arguments arg If Compare Return return:no If Call If Compare Return return:no Assign Raise Call" + }, + { + "library": "pytorch", + "name": "build_groups_memberships", + "source_code": "def build_groups_memberships(pg_config: Any) -> tuple[list[Group], dict[Any, Group], list[Membership], dict[str, set[Any]], dict[tuple[str, int], str]]:\n groups = []\n memberships = []\n _groups = {}\n _memberships = {}\n _pg_guids = {}\n for global_rank in pg_config:\n for pg_uid in pg_config[global_rank]:\n desc = pg_config[global_rank][pg_uid]['desc']\n ranks = ast.literal_eval(pg_config[global_rank][pg_uid]['ranks'])\n pg_guid = pg_uid + str(hash(frozenset(ranks)))\n _pg_guids[pg_uid, global_rank] = pg_guid\n if isinstance(ranks, str):\n ranks = eval(ranks)\n if pg_guid not in _groups:\n groups.append(Group(id=pg_guid, desc=desc, size=len(ranks)))\n for rank in ranks:\n memberships.append(Membership(group_id=pg_guid, global_rank=rank))\n _groups[pg_guid] = groups[-1]\n _memberships[pg_guid] = set(ranks)\n else:\n assert _groups[pg_guid].desc == desc, f'mismatch in desc {_groups[pg_guid].desc} vs {desc} for group {pg_guid}'\n assert _memberships[pg_guid] == set(ranks), f'mismatch in membership for group {pg_guid} {_memberships[pg_guid]} vs {set(ranks)}'\n return (groups, _groups, memberships, _memberships, _pg_guids)", + "docstring": "pg_config: { global_rank: { (pg_guid, desc, ranks) } } is a system generated id, but depending on the mode of PG creation it could be a globally incrementing int or a hash of the ranks. See in distributed_c10d.py. is provided by the user (optionally) and should be 'meaningful' (e.g. TP/PP/DP group) is a list of the 'global ranks' that are members of the PG. (pg_guid, desc, ranks) tuples are appended lazily to the flight buffer when is called on a PG and the flag is true for that PG. - the order of calling (init_process_group, new_group, etc) does not affect the order of the tuples in the list Returns: : a groups table where each row is a Group namedtuple. : a dict that is indexed by pg_guid with Group namedtuple as value. : a membership table where each row is a Membership namedtuple. : a dict that is indexed by pg_guid with set of ranks (int) as value. : a dict that is indexed by (pg_uid, global_rank) with pg_guid as value.", + "type": "function", + "file_path": "pytorch\\tools\\flight_recorder\\components\\builder.py", + "ast_data": "FunctionDef name:build_groups_memberships arg:pg_config arguments arg Assign Assign Assign Assign Assign For For Assign Assign Call Assign Call Call Call Assign If Call Assign Call If Compare Call Call Call For Call Call Assign Assign Call Compare Compare Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "Kernel", + "source_code": "@dataclasses.dataclass\nclass Kernel:\n func: Callable\n source: str\n\n def __call__(self, *args, **kwargs):\n return self.func(*args, **kwargs)", + "docstring": "Models a (function, source location)", + "type": "class", + "file_path": "pytorch\\torch\\_library\\utils.py", + "ast_data": "ClassDef name:Kernel FunctionDef name:__call__ arg:self arguments arg arg arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "_UFuncOutputCastingError", + "source_code": "@_display_as_base\nclass _UFuncOutputCastingError(_UFuncCastingError):\n\n def __init__(self, ufunc, casting, from_, to, i):\n super().__init__(ufunc, casting, from_, to)\n self.out_i = i\n\n def __str__(self):\n i_str = f'{self.out_i} ' if self.ufunc.nout != 1 else ''\n return f'Cannot cast ufunc {self.ufunc.__name__!r} output {i_str}from {self.from_!r} to {self.to!r} with casting rule {self.casting!r}'", + "docstring": "Thrown when a ufunc output cannot be casted", + "type": "class", + "file_path": "numpy\\numpy\\_core\\_exceptions.py", + "ast_data": "ClassDef name:_UFuncOutputCastingError FunctionDef name:__init__ arg:self arg:ufunc arg:casting arg:from_ arg:to arg:i arguments arg arg arg arg arg arg Call Call Assign FunctionDef name:__str__ arg:self arguments arg Assign Compare Return return:yes" + }, + { + "library": "scikit-learn", + "name": "idf_", + "source_code": "@property\ndef idf_(self):\n if not hasattr(self, '_tfidf'):\n raise NotFittedError(f\"{self.__class__.__name__} is not fitted yet. Call 'fit' with appropriate arguments before using this attribute.\")\n return self._tfidf.idf_", + "docstring": "Inverse document frequency vector, only defined if . Returns ------- ndarray of shape (n_features,)", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py", + "ast_data": "FunctionDef name:idf_ arg:self arguments arg If Call Raise Call Return return:yes" + }, + { + "library": "django", + "name": "static", + "source_code": "def static(request):\n return {'STATIC_URL': settings.STATIC_URL}", + "docstring": "Add static-related context variables to the context.", + "type": "function", + "file_path": "django\\django\\template\\context_processors.py", + "ast_data": "FunctionDef name:static arg:request arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_width", + "source_code": "def set_width(self, w):\n self._width = w\n self.stale = True", + "docstring": "Set the width of the rectangle.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:set_width arg:self arg:w arguments arg arg Assign Assign" + }, + { + "library": "pygame", + "name": "__init__", + "source_code": "def __init__(self, device=0, size=(640, 480), mode='RGB', api_preference=None):\n self._device_index = device\n self._size = size\n self.api_preference = api_preference\n if api_preference is not None and sys.platform == 'win32':\n self.api_preference = cv2.CAP_DSHOW\n if mode == 'RGB':\n self._fmt = cv2.COLOR_BGR2RGB\n elif mode == 'YUV':\n self._fmt = cv2.COLOR_BGR2YUV\n elif mode == 'HSV':\n self._fmt = cv2.COLOR_BGR2HSV\n else:\n raise ValueError('Not a supported mode')\n self._open = False", + "docstring": "api_preference - cv2.CAP_DSHOW cv2.CAP_V4L2 cv2.CAP_MSMF and others # See", + "type": "method", + "file_path": "pygame\\src_py\\_camera_opencv.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:device arg:size arg:mode arg:api_preference arguments arg arg arg arg arg Assign Assign Assign If BoolOp Compare Compare Assign If Compare Assign If Compare Assign If Compare Assign Raise Call Assign" + }, + { + "library": "pytorch", + "name": "set_observed_to_quantized_mapping", + "source_code": "def set_observed_to_quantized_mapping(self, observed_class: type, quantized_class: type, quant_type: QuantType=QuantType.STATIC) -> ConvertCustomConfig:\n if quant_type not in self.observed_to_quantized_mapping:\n self.observed_to_quantized_mapping[quant_type] = {}\n self.observed_to_quantized_mapping[quant_type][observed_class] = quantized_class\n return self", + "docstring": "Set the mapping from a custom observed module class to a custom quantized module class. The quantized module class must have a `` class method that converts the observed module class to the quantized module class.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py", + "ast_data": "FunctionDef name:set_observed_to_quantized_mapping arg:self arg:observed_class arg:quantized_class arg:quant_type arguments arg arg arg arg If Compare Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_cache_output_metric_attributes", + "source_code": "def _cache_output_metric_attributes(self, metrics, weighted_metrics):\n output_shapes = []\n for output in self.outputs:\n if output is None or output.shape.rank is None:\n output_shapes.append(None)\n else:\n output_shapes.append(output.shape.as_list())\n self._per_output_metrics = training_utils_v1.collect_per_output_metric_info(metrics, self.output_names, output_shapes, self.loss_functions, from_serialized=self._from_serialized)\n self._per_output_weighted_metrics = training_utils_v1.collect_per_output_metric_info(weighted_metrics, self.output_names, output_shapes, self.loss_functions, from_serialized=self._from_serialized, is_weighted=True)", + "docstring": "Caches metric name and function attributes for every model output.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py", + "ast_data": "FunctionDef name:_cache_output_metric_attributes arg:self arg:metrics arg:weighted_metrics arguments arg arg arg Assign For If BoolOp Compare Compare Call Call Call Assign Call Assign Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, fig, *, cmap=None, norm=None, colorizer=None, offsetx=0, offsety=0, origin=None, **kwargs):\n super().__init__(None, norm=norm, cmap=cmap, colorizer=colorizer, origin=origin)\n self.set_figure(fig)\n self.ox = offsetx\n self.oy = offsety\n self._internal_update(kwargs)\n self.magnification = 1.0", + "docstring": "cmap is a colors.Colormap instance norm is a colors.Normalize instance to map luminance to 0-1 kwargs are an optional list of Artist keyword args", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\image.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:fig arguments arg arg arg arg arg arg arg arg arg Call Call Call Assign Assign Call Assign" + }, + { + "library": "pytorch", + "name": "jvp", + "source_code": "@staticmethod\ndef jvp(ctx: Any, *grad_inputs: Any) -> Any:\n raise NotImplementedError('You must implement the jvp function for custom autograd.Function to use it with forward mode AD.')", + "docstring": "Define a formula for differentiating the operation with forward mode automatic differentiation. This function is to be overridden by all subclasses. It must accept a context :attr: as the first argument, followed by as many inputs as the :func: got (None will be passed in for non tensor inputs of the forward function), and it should return as many tensors as there were outputs to :func:. Each argument is the gradient w.r.t the given input, and each returned value should be the gradient w.r.t. the corresponding output. If an output is not a Tensor or the function is not differentiable with respect to that output, you can just pass None as a gradient for that input. You can use the :attr: object to pass any value from the forward to this functions.", + "type": "method", + "file_path": "pytorch\\torch\\autograd\\function.py", + "ast_data": "FunctionDef name:jvp arg:ctx arguments arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "_input_index", + "source_code": "def _input_index(op, handle):\n for i, t in enumerate(op.inputs):\n if handle is t:\n return i\n raise ValueError(f'{handle!s} not in list of inputs for op: {op!r}')", + "docstring": "Returns the index of in . Args: op: Operation. handle: Resource handle. Returns: Index in receiving the resource . Raises: ValueError: If handle and its replicated input are both not found in .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\auto_control_deps_utils.py", + "ast_data": "FunctionDef name:_input_index arg:op arg:handle arguments arg arg For Call If Compare Return return:yes Raise Call" + }, + { + "library": "scikit-learn", + "name": "_use_cache", + "source_code": "def _use_cache(self, estimator):\n if len(self._scorers) == 1:\n return False\n counter = Counter([_check_response_method(estimator, scorer._response_method).__name__ for scorer in self._scorers.values() if isinstance(scorer, _BaseScorer)])\n if any((val > 1 for val in counter.values())):\n return True\n return False", + "docstring": "Return True if using a cache is beneficial, thus when a response method will be called several time.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\metrics\\_scorer.py", + "ast_data": "FunctionDef name:_use_cache arg:self arg:estimator arguments arg arg If Compare Call Return return:yes Assign Call Call Call Call If Call Compare Call Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "file_complete", + "source_code": "def file_complete(self, file_size):\n raise NotImplementedError('subclasses of FileUploadHandler must provide a file_complete() method')", + "docstring": "Signal that a file has completed. File size corresponds to the actual size accumulated by all the chunks. Subclasses should return a valid `` object.", + "type": "method", + "file_path": "django\\django\\core\\files\\uploadhandler.py", + "ast_data": "FunctionDef name:file_complete arg:self arg:file_size arguments arg arg Raise Call" + }, + { + "library": "django", + "name": "country", + "source_code": "def country(self, query):\n response = self._query(query, require_city=False)\n return {'continent_code': response.continent.code, 'continent_name': response.continent.name, 'country_code': response.country.iso_code, 'country_name': response.country.name, 'is_in_european_union': response.country.is_in_european_union}", + "docstring": "Return a dictionary with the country code and name when given an IP address or a Fully Qualified Domain Name (FQDN). For example, both '24.124.1.80' and 'djangoproject.com' are valid parameters.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geoip2.py", + "ast_data": "FunctionDef name:country arg:self arg:query arguments arg arg Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "pts_to_poststep", + "source_code": "def pts_to_poststep(x, *args):\n steps = np.zeros((1 + len(args), max(2 * len(x) - 1, 0)))\n steps[0, 0::2] = x\n steps[0, 1::2] = steps[0, 2::2]\n steps[1:, 0::2] = args\n steps[1:, 1::2] = steps[1:, 0:-2:2]\n return steps", + "docstring": "Convert continuous line to post-steps. Given a set of ``, the length will be 0. Examples -------- >>> x_s, y1_s, y2_s = pts_to_poststep(x, y1, y2)", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:pts_to_poststep arg:x arguments arg arg Assign Call Call Call Call Assign Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "from_config", + "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n _check_config_keys(config, cls._fields)\n kwargs = _standardize_and_copy_config(config)\n kwargs['dtype'] = dtypes.as_dtype(config['dtype'])\n return cls(**kwargs)", + "docstring": "See 'FeatureColumn` base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "DynamoConfigPatchVariable", + "source_code": "class DynamoConfigPatchVariable(ContextWrappingVariable):\n\n def __init__(self, target_values, **kwargs) -> None:\n target_values = tuple(target_values.items())\n super().__init__(target_values=(target_values,), initial_values=None, **kwargs)\n self.initial_values = {}\n for key, _ in target_values:\n self.initial_values[key] = torch._dynamo.config.__getattr__(key)\n self.initial_values = (tuple(self.initial_values.items()),)\n\n def enter(self, tx):\n self.set_cleanup_hook(tx)\n self._call_func(tx, self.target_values)\n return variables.ConstantVariable.create(None)\n\n def exit(self, tx: 'InstructionTranslator', *args):\n self._call_func(tx, self.initial_values)\n return variables.ConstantVariable.create(None)\n\n def _call_func(self, tx: 'InstructionTranslator', values):\n assert len(values) == 1\n value = values[0]\n for key, val in value:\n torch._dynamo.config.__setattr__(key, val)\n\n def module_name(self):\n return 'torch._dynamo'\n\n def fn_name(self):\n return 'patch_dynamo_config'", + "docstring": "represents torch._dynamo.patch_dynamo_config", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\variables\\ctx_manager.py", + "ast_data": "ClassDef name:DynamoConfigPatchVariable FunctionDef name:__init__ arg:self arg:target_values arguments arg arg arg Assign Call Call Call Call Assign For Assign Call Assign Call Call FunctionDef name:enter arg:self arg:tx arguments arg arg Call Call Return return:yes Call FunctionDef name:exit arg:self arg:tx arguments arg arg arg Call Return return:yes Call FunctionDef name:_call_func arg:self arg:tx arg:values arguments arg arg arg Compare Call Assign For Call FunctionDef name:module_name arg:self arguments arg Return return:yes FunctionDef name:fn_name arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "_Edge_integer", + "source_code": "class _Edge_integer:\n\n def __init__(self, step, offset):\n if step <= 0:\n raise ValueError(\"'step' must be positive\")\n self.step = step\n self._offset = abs(offset)\n\n def closeto(self, ms, edge):\n if self._offset > 0:\n digits = np.log10(self._offset / self.step)\n tol = max(1e-10, 10 ** (digits - 12))\n tol = min(0.4999, tol)\n else:\n tol = 1e-10\n return abs(ms - edge) < tol\n\n def le(self, x):\n d, m = divmod(x, self.step)\n if self.closeto(m / self.step, 1):\n return d + 1\n return d\n\n def ge(self, x):\n d, m = divmod(x, self.step)\n if self.closeto(m / self.step, 0):\n return d\n return d + 1", + "docstring": "Helper for , , etc. Take floating-point precision limitations into account when calculating tick locations as integer multiples of a step.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "ClassDef name:_Edge_integer FunctionDef name:__init__ arg:self arg:step arg:offset arguments arg arg arg If Compare Raise Call Assign Assign Call FunctionDef name:closeto arg:self arg:ms arg:edge arguments arg arg arg If Compare Assign Call Assign Call Assign Call Assign Return return:yes Compare Call FunctionDef name:le arg:self arg:x arguments arg arg Assign Call If Call Return return:yes Return return:yes FunctionDef name:ge arg:self arg:x arguments arg arg Assign Call If Call Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_neg_log_likelihood", + "source_code": "def _neg_log_likelihood(lmbda):\n x_trans = self._yeo_johnson_transform(x, lmbda)\n n_samples = x.shape[0]\n x_trans_var = x_trans.var()\n if x_trans_var < x_tiny:\n return np.inf\n log_var = np.log(x_trans_var)\n loglike = -n_samples / 2 * log_var\n loglike += (lmbda - 1) * (np.sign(x) * np.log1p(np.abs(x))).sum()\n return -loglike", + "docstring": "Return the negative log likelihood of the observed data x as a function of lambda.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", + "ast_data": "FunctionDef name:_neg_log_likelihood arg:lmbda arguments arg Assign Call Assign Assign Call If Compare Return return:yes Assign Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "id_in_cluster", + "source_code": "def id_in_cluster(cluster_spec, task_type, task_id):\n _validate_cluster_spec(cluster_spec, task_type, task_id)\n cluster_spec = normalize_cluster_spec(cluster_spec).as_dict()\n if task_type == 'chief':\n return 0\n if task_type == 'worker':\n return task_id + len(cluster_spec.get('chief', []))\n if task_type == 'evaluator':\n return task_id\n raise ValueError('There is no id for task_type %r' % task_type)", + "docstring": "Returns a unique id for the task in the 's cluster. It returns an id ranging from [0, ). Note: this function assumes that \"evaluate\" job is in its own cluster or its own partition of a cluster. Args: cluster_spec: a dict, or object to be validated. task_type: string indicating the type of the task. task_id: the id of the in this cluster. Returns: an int indicating the unique id. Throws: ValueError: if is not \"chief\", \"worker\" or \"evaluator\".", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_worker_util.py", + "ast_data": "FunctionDef name:id_in_cluster arg:cluster_spec arg:task_type arg:task_id arguments arg arg arg Call Assign Call Call If Compare Return return:yes If Compare Return return:yes Call Call If Compare Return return:yes Raise Call" + }, + { + "library": "pytorch", + "name": "DispatchCacheInfo", + "source_code": "@dataclass_slots\n@dataclass(frozen=True)\nclass DispatchCacheInfo:\n hits: int\n misses: int\n bypasses: dict[str, int]\n size: int", + "docstring": "Information about the state of the FakeTensor dispatch cache.", + "type": "class", + "file_path": "pytorch\\torch\\_subclasses\\fake_tensor.py", + "ast_data": "ClassDef name:DispatchCacheInfo Call" + }, + { + "library": "tensorflow", + "name": "sin", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef sin(x):\n return math_ops.sin(x)", + "docstring": "Computes sin of x element-wise. Args: x: Tensor or variable. Returns: A tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:sin arg:x arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_Entrypoint", + "source_code": "@dataclasses.dataclass(frozen=True)\nclass _Entrypoint:\n module: str\n name: str\n exported_symbol: exported_api.ExportedSymbol\n\n def get_import(self, file_prefixes_to_strip: Sequence[str], module_prefix: str, use_lazy_loading: bool) -> str:\n module_import_path = _get_import_path(self.exported_symbol.file_name, file_prefixes_to_strip, module_prefix)\n alias = ''\n symbol_name = self.exported_symbol.symbol_name\n if self.name != symbol_name:\n alias = f' as {self.name}'\n if not use_lazy_loading:\n return f'from {module_import_path} import {symbol_name}{alias} # line: {self.exported_symbol.line_no}'\n else:\n return f\" '{self.name}': ('{module_import_path}', '{symbol_name}'), # line: {self.exported_symbol.line_no}\"", + "docstring": "An entrypoint that was exposed by the use of a decorator. Attributes: module: The public module that the symbol was exposed to. For example: tensorflow.io. name: The name the symbol was exported as. For example: decode_png. exported_symbol: The symbol that this entrypoint refers back to.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\generator\\generator.py", + "ast_data": "ClassDef name:_Entrypoint FunctionDef name:get_import arg:self arg:file_prefixes_to_strip arg:module_prefix arg:use_lazy_loading arguments arg arg arg arg Assign Call Assign Assign If Compare Assign If Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_init", + "source_code": "def _init(self, graph: 'Graph') -> None:\n self.graph = graph\n self._original_op = None\n self._inputs_val = None\n self._device_code_locations = None\n self._colocation_code_locations = None\n self._control_flow_context = self.graph._get_control_flow_context()\n self._gradient_function = None\n self._init_outputs()\n self._id_value = self.graph._add_op(self)", + "docstring": "Initializes Operation from a TF_Operation.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_init arg:self arg:graph arguments arg arg Assign Assign Assign Assign Assign Assign Call Assign Call Assign Call" + }, + { + "library": "pandas", + "name": "validate_periods", + "source_code": "def validate_periods(periods: int | None) -> int | None:\n if periods is not None and (not lib.is_integer(periods)):\n raise TypeError(f'periods must be an integer, got {periods}')\n return periods", + "docstring": "If a argument is passed to the Datetime/Timedelta Array/Index constructor, cast it to an integer. Parameters ---------- periods : None, int Returns ------- periods : None or int Raises ------ TypeError if periods is not None or int", + "type": "function", + "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", + "ast_data": "FunctionDef name:validate_periods arg:periods arguments arg If BoolOp Compare Call Raise Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "translated", + "source_code": "def translated(self, tx, ty):\n return Bbox(self._points + (tx, ty))", + "docstring": "Construct a by translating this one by *tx* and *ty*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:translated arg:self arg:tx arg:ty arguments arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "normalize_path_patterns", + "source_code": "def normalize_path_patterns(patterns):\n patterns = [os.path.normcase(p) for p in patterns]\n dir_suffixes = {'%s*' % path_sep for path_sep in {'/', os.sep}}\n norm_patterns = []\n for pattern in patterns:\n for dir_suffix in dir_suffixes:\n if pattern.endswith(dir_suffix):\n norm_patterns.append(pattern.removesuffix(dir_suffix))\n break\n else:\n norm_patterns.append(pattern)\n return norm_patterns", + "docstring": "Normalize an iterable of glob style patterns based on OS.", + "type": "function", + "file_path": "django\\django\\core\\management\\utils.py", + "ast_data": "FunctionDef name:normalize_path_patterns arg:patterns arguments arg Assign Call Assign Assign For For If Call Call Call Call Return return:yes" + }, + { + "library": "authlib", + "name": "authenticate_token", + "source_code": "def authenticate_token(self, request):\n raise NotImplementedError()", + "docstring": "Authenticate current credential who is requesting to register a client. Developers MUST implement this method in subclass:: def authenticate_token(self, request): auth = request.headers.get(\"Authorization\") return get_token_by_auth(auth) :return: token instance", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7591\\endpoint.py", + "ast_data": "FunctionDef name:authenticate_token arg:self arg:request arguments arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "set_issue_status", + "source_code": "def set_issue_status(self, repo: str, issue_number: int, status: str) -> requests.Response:\n endpoint = f'repos/{repo}/issues/{issue_number}'\n return self._make_request('POST', endpoint, status=status)", + "docstring": "Sets the status of an issue (or PR). an-issue Arguments: repo: a string of the form , e.g. openxla/xla issue_number: the issue (or PR) to set the status of status: the status to set Returns: a requests.Response object containing the response from the API. Raises: requests.exceptions.HTTPError", + "type": "method", + "file_path": "tensorflow\\third_party\\xla\\.github\\workflows\\github_api.py", + "ast_data": "FunctionDef name:set_issue_status arg:self arg:repo arg:issue_number arg:status arguments arg arg arg arg Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "then", + "source_code": "def then(self, callback: Callable[[Future[T]], S]) -> Future[S]:\n return cast(Future[S], super().then(callback))", + "docstring": "Append the given callback function to this `valuewait` will be marked appropriately with the encountered error. However, if this callback later completes additional futures, those futures are not marked as completed with an error and the user is responsible for handling completion/waiting on those futures independently. Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) >>> def callback(fut): ... print(f\"RPC return value is {fut.wait()}.\") >>> fut = torch.futures.Future() >>> # The inserted callback will print the return value when >>> # receiving the response from \"worker1\" >>> cb_fut = fut.then(callback) >>> chain_cb_fut = cb_fut.then( ... lambda x : print(f\"Chained cb done. {x.wait()}\") ... ) >>> fut.set_result(5) RPC return value is 5. Chained cb done. None", + "type": "method", + "file_path": "pytorch\\torch\\futures\\__init__.py", + "ast_data": "FunctionDef name:then arg:self arg:callback arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "sharez", + "source_code": "def sharez(self, other):\n _api.check_isinstance(Axes3D, other=other)\n if self._sharez is not None and other is not self._sharez:\n raise ValueError('z-axis is already shared')\n self._shared_axes['z'].join(self, other)\n self._sharez = other\n self.zaxis.major = other.zaxis.major\n self.zaxis.minor = other.zaxis.minor\n z0, z1 = other.get_zlim()\n self.set_zlim(z0, z1, emit=False, auto=other.get_autoscalez_on())\n self.zaxis._scale = other.zaxis._scale", + "docstring": "Share the z-axis with *other*. This is equivalent to passing `` when constructing the Axes, and cannot be used if the z-axis is already being shared with another Axes. Note that it is not possible to unshare axes.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", + "ast_data": "FunctionDef name:sharez arg:self arg:other arguments arg arg Call If BoolOp Compare Compare Raise Call Call Assign Assign Assign Assign Call Call Call Assign" + }, + { + "library": "pandas", + "name": "apply", + "source_code": "def apply(self, f, align_keys: list[str] | None=None, **kwargs) -> Self:\n assert 'filter' not in kwargs\n align_keys = align_keys or []\n result_blocks: list[Block] = []\n aligned_args = {k: kwargs[k] for k in align_keys}\n for b in self.blocks:\n if aligned_args:\n for k, obj in aligned_args.items():\n if isinstance(obj, (ABCSeries, ABCDataFrame)):\n if obj.ndim == 1:\n kwargs[k] = obj.iloc[b.mgr_locs.indexer]._values\n else:\n kwargs[k] = obj.iloc[:, b.mgr_locs.indexer]._values\n else:\n kwargs[k] = obj[b.mgr_locs.indexer]\n if callable(f):\n applied = b.apply(f, **kwargs)\n else:\n applied = getattr(b, f)(**kwargs)\n result_blocks = extend_blocks(applied, result_blocks)\n out = type(self).from_blocks(result_blocks, self.axes)\n return out", + "docstring": "Iterate over the blocks, collect and create a new BlockManager. Parameters ---------- f : str or callable Name of the Block method to apply. align_keys: List[str] or None, default None **kwargs Keywords to pass to Returns ------- BlockManager", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:apply arg:self arg:f arg:align_keys arguments arg arg arg arg Compare Assign BoolOp Assign For If For Call If Call If Compare Assign Assign Assign If Call Assign Call Assign Call Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "CapStyle", + "source_code": "class CapStyle(str, Enum):\n butt = 'butt'\n projecting = 'projecting'\n round = 'round'\n\n @staticmethod\n def demo():\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(4, 1.2))\n ax = fig.add_axes([0, 0, 1, 0.8])\n ax.set_title('Cap style')\n for x, style in enumerate(['butt', 'round', 'projecting']):\n ax.text(x + 0.25, 0.85, style, ha='center')\n xx = [x, x + 0.5]\n yy = [0, 0]\n ax.plot(xx, yy, lw=12, color='tab:blue', solid_capstyle=style)\n ax.plot(xx, yy, lw=1, color='black')\n ax.plot(xx, yy, 'o', color='tab:red', markersize=3)\n ax.set_ylim(-0.5, 1.5)\n ax.set_axis_off()\n fig.show()", + "docstring": "Define how the two endpoints (caps) of an unclosed line are drawn. How to draw the start and end points of lines that represent a closed curve (i.e. that end in a ) is controlled by the line's . For all other lines, how the start and end points are drawn is controlled by the *CapStyle*. For a visual impression of each *CapStyle*, or run . By default, draws a stroked line as squared off at its endpoints. **Supported values:** .. rst-class:: value-list 'butt' the line is squared off at its endpoint. 'projecting' the line is squared off as in *butt*, but the filled in area extends beyond the endpoint a distance of ``. .. plot:: :alt: Demo of possible CapStyle's from matplotlib._enums import CapStyle CapStyle.demo()", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\_enums.py", + "ast_data": "ClassDef name:CapStyle Assign Assign Assign FunctionDef name:demo arguments Assign Call Assign Call Call For Call Call Assign Assign Call Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "init_from_model", + "source_code": "@classmethod\ndef init_from_model(cls, py_model_setup: Optional[str]=None, cpp_model_setup: Optional[str]=None, setup: GroupedSetup=GroupedSetup(), signature: Optional[str]=None, torchscript: bool=False, autograd: bool=False, num_threads: Union[int, tuple[int, ...]]=1) -> 'GroupedBenchmark':\n signature_args, signature_output = cls._parse_signature(signature)\n if signature_args is None:\n raise ValueError('signature is needed when initializing from model definitions.')\n return cls(*cls._make_model_invocation(signature_args, signature_output, RuntimeMode.EAGER), py_model_setup=py_model_setup, cpp_model_setup=cpp_model_setup, inferred_model_setup=False, setup=setup, signature_args=signature_args, signature_output=signature_output, torchscript=torchscript, autograd=autograd, num_threads=(num_threads,) if isinstance(num_threads, int) else num_threads)", + "docstring": "Create a set of benchmarks using torch.nn Modules. This method of benchmark creation takes setup code, and then calls a model rather than a free form block of code. As a result, there are two additional requirements compared to : - must be provided. - A model (named \"model\") must be defined, either with or in Python or in C++.", + "type": "method", + "file_path": "pytorch\\benchmarks\\instruction_counts\\core\\api.py", + "ast_data": "FunctionDef name:init_from_model arg:cls arg:py_model_setup arg:cpp_model_setup arg:setup arg:signature arg:torchscript arg:autograd arg:num_threads arguments arg arg arg arg arg arg arg arg Call Assign Call If Compare Raise Call Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "multilabel_", + "source_code": "@property\ndef multilabel_(self):\n return self.label_binarizer_.y_type_.startswith('multilabel')", + "docstring": "Whether this is a multilabel classifier.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\multiclass.py", + "ast_data": "FunctionDef name:multilabel_ arg:self arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "fundamental_from_essential", + "source_code": "def fundamental_from_essential(E_mat: Tensor, K1: Tensor, K2: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(E_mat, ['*', '3', '3'])\n KORNIA_CHECK_SHAPE(K1, ['*', '3', '3'])\n KORNIA_CHECK_SHAPE(K2, ['*', '3', '3'])\n if not len(E_mat.shape[:-2]) == len(K1.shape[:-2]) == len(K2.shape[:-2]):\n raise AssertionError\n return safe_inverse_with_mask(K2)[0].transpose(-2, -1) @ E_mat @ safe_inverse_with_mask(K1)[0]", + "docstring": "Get the Fundamental matrix from Essential and camera matrices. Uses the method from Hartley/Zisserman 9.6 pag 257 (formula 9.12). Args: E_mat: The essential matrix with shape of :math:. K1: The camera matrix from first camera with shape :math:. K2: The camera matrix from second camera with shape :math:. Returns: The fundamental matrix with shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\epipolar\\fundamental.py", + "ast_data": "FunctionDef name:fundamental_from_essential arg:E_mat arg:K1 arg:K2 arguments arg arg arg Call Call Call If Compare Call Call Call Raise Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "is_slash_name", + "source_code": "def is_slash_name(self):\n return False", + "docstring": "Is this a name token that starts with a slash?", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_type1font.py", + "ast_data": "FunctionDef name:is_slash_name arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "WayburnSeader02", + "source_code": "class WayburnSeader02(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-500.0] * self.N, [500.0] * self.N))\n self.custom_bounds = ([-1, 2], [-1, 2])\n self.global_optimum = [[0.2, 1.0]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n u = (1.613 - 4 * (x[0] - 0.3125) ** 2 - 4 * (x[1] - 1.625) ** 2) ** 2\n v = (x[1] - 1) ** 2\n return u + v", + "docstring": "Wayburn and Seader 2 objective function. This class defines the Wayburn and Seader 2 [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\\text{WayburnSeader02}}(x) = \\left[ 1.613 - 4(x_1 - 0.3125)^2 - 4(x_2 - 1.625)^2 \\right]^2 + (x_2 - 1)^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_W.py", + "ast_data": "ClassDef name:WayburnSeader02 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "FixedLengthRecordDatasetV1", + "source_code": "@tf_export(v1=['data.FixedLengthRecordDataset'])\nclass FixedLengthRecordDatasetV1(dataset_ops.DatasetV1Adapter):\n\n def __init__(self, filenames, record_bytes, header_bytes=None, footer_bytes=None, buffer_size=None, compression_type=None, num_parallel_reads=None, name=None):\n wrapped = FixedLengthRecordDatasetV2(filenames, record_bytes, header_bytes, footer_bytes, buffer_size, compression_type, num_parallel_reads, name=name)\n super(FixedLengthRecordDatasetV1, self).__init__(wrapped)\n __init__.__doc__ = FixedLengthRecordDatasetV2.__init__.__doc__\n\n @property\n def _filenames(self):\n return self._dataset._filenames\n\n @_filenames.setter\n def _filenames(self, value):\n self._dataset._filenames = value", + "docstring": "A of fixed-length records from one or more binary files.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py", + "ast_data": "ClassDef name:FixedLengthRecordDatasetV1 FunctionDef name:__init__ arg:self arg:filenames arg:record_bytes arg:header_bytes arg:footer_bytes arg:buffer_size arg:compression_type arg:num_parallel_reads arg:name arguments arg arg arg arg arg arg arg arg arg Assign Call Call Call Assign FunctionDef name:_filenames arg:self arguments arg Return return:yes FunctionDef name:_filenames arg:self arg:value arguments arg arg Assign Call" + }, + { + "library": "pytorch", + "name": "softmax", + "source_code": "def softmax(input: Tensor, dim: Optional[int]=None, _stacklevel: int=3, dtype: Optional[DType]=None) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(softmax, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype)\n if dim is None:\n dim = _get_softmax_dim('softmax', input.dim(), _stacklevel)\n if dtype is None:\n ret = input.softmax(dim)\n else:\n ret = input.softmax(dim, dtype=dtype)\n return ret", + "docstring": "Apply a softmax function. Softmax is defined as: :math: It is applied to all slices along dim, and will re-scale them so that the elements lie in the range and sum to 1. See :class: for more details. Args: input (Tensor): input dim (int): A dimension along which softmax will be computed. dtype (:class:, optional): the desired data type of returned tensor. If specified, the input tensor is casted to :attr: before the operation is performed. This is useful for preventing data type overflows. Default: None. .. note:: This function doesn't work directly with NLLLoss, which expects the Log to be computed between the Softmax and itself. Use log_softmax instead (it's faster and has better numerical properties).", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:softmax arg:input arg:dim arg:_stacklevel arg:dtype arguments arg arg arg arg If Call Return return:yes Call If Compare Assign Call Call If Compare Assign Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "window_hanning", + "source_code": "def window_hanning(x):\n return np.hanning(len(x)) * x", + "docstring": "Return *x* times the Hanning (or Hann) window of len(*x*). See Also -------- window_none : Another window algorithm.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\mlab.py", + "ast_data": "FunctionDef name:window_hanning arg:x arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "ObjectGraphProtoPrettyPrinter", + "source_code": "class ObjectGraphProtoPrettyPrinter:\n __slots__ = ['_object_graph_proto', '_node_name_cache']\n\n def __init__(self, object_graph_proto):\n self._object_graph_proto = object_graph_proto\n self._node_name_cache = None\n\n @property\n def node_names(self):\n if self._node_name_cache is not None:\n return self._node_name_cache\n path_to_root = {}\n path_to_root[0] = ('(root)',)\n to_visit = collections.deque([0])\n while to_visit:\n node_id = to_visit.popleft()\n obj = self._object_graph_proto.nodes[node_id]\n for child in obj.children:\n if child.node_id not in path_to_root:\n path_to_root[child.node_id] = path_to_root[node_id] + (child.local_name,)\n to_visit.append(child.node_id)\n node_names = {}\n for node_id, path_to_root in path_to_root.items():\n node_names[node_id] = '.'.join(path_to_root)\n for node_id, node in enumerate(self._object_graph_proto.nodes):\n for slot_reference in node.slot_variables:\n node_names[slot_reference.slot_variable_node_id] = f\"{node_names[node_id]}'s state '{slot_reference.slot_name}' for {node_names[slot_reference.original_variable_node_id]}\"\n self._node_name_cache = node_names\n return node_names", + "docstring": "Lazily traverses an object graph proto to pretty print names. If no calls to are made this object has no performance overhead. On the other hand, it will only traverse the object graph once, so repeated naming is cheap after the first.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "ClassDef name:ObjectGraphProtoPrettyPrinter Assign FunctionDef name:__init__ arg:self arg:object_graph_proto arguments arg arg Assign Assign FunctionDef name:node_names arg:self arguments arg If Compare Return return:yes Assign Assign Assign Call While Assign Call Assign For If Compare Assign Call Assign For Call Assign Call For Call For Assign Assign Return return:yes" + }, + { + "library": "kornia", + "name": "_range_bound", + "source_code": "def _range_bound(factor: Union[Tensor, float, Tuple[float, float], List[float]], name: str, center: Optional[float]=0.0, bounds: Optional[Tuple[float, float]]=(0, float('inf')), check: Optional[str]='joint', device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Tensor:\n if device is None:\n device = torch.device('cpu')\n if dtype is None:\n dtype = torch.get_default_dtype()\n if not isinstance(factor, Tensor):\n factor = tensor(factor, device=device, dtype=dtype)\n factor_bound: Tensor\n if factor.dim() == 0:\n if factor < 0:\n raise ValueError(f'If {name} is a single number, it must be non negative. Got {factor}.')\n if center is None or bounds is None:\n raise ValueError(f'`center` and `bounds` cannot be None for single number. Got {center}, {bounds}.')\n factor_bound = factor.repeat(2) * tensor([-1.0, 1.0], device=factor.device, dtype=factor.dtype) + center\n factor_bound = factor_bound.clamp(bounds[0], bounds[1]).to(device=device, dtype=dtype)\n else:\n factor_bound = as_tensor(factor, device=device, dtype=dtype)\n if check is not None:\n if check == 'joint':\n _joint_range_check(factor_bound, name, bounds)\n elif check == 'singular':\n _singular_range_check(factor_bound, name, bounds)\n else:\n raise NotImplementedError(f\"methods '{check}' not implemented.\")\n return factor_bound", + "docstring": "Check inputs and compute the corresponding factor bounds.", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\utils\\param_validation.py", + "ast_data": "FunctionDef name:_range_bound arg:factor arg:name arg:center arg:bounds arg:check arg:device arg:dtype arguments arg arg arg arg arg arg arg Call If Compare Assign Call If Compare Assign Call If Call Assign Call If Compare Call If Compare Raise Call If BoolOp Compare Compare Raise Call Assign Call Call Assign Call Call Assign Call If Compare If Compare Call If Compare Call Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_canonical_name_for_symbol", + "source_code": "def get_canonical_name_for_symbol(symbol: Any, api_name: str=TENSORFLOW_API_NAME, add_prefix_to_v1_names: bool=False) -> Optional[str]:\n if not hasattr(symbol, '__dict__'):\n return None\n api_names_attr = API_ATTRS[api_name].names\n _, undecorated_symbol = tf_decorator.unwrap(symbol)\n if api_names_attr not in undecorated_symbol.__dict__:\n return None\n api_names = getattr(undecorated_symbol, api_names_attr)\n deprecated_api_names = undecorated_symbol.__dict__.get('_tf_deprecated_api_names', [])\n canonical_name = get_canonical_name(api_names, deprecated_api_names)\n if canonical_name:\n return canonical_name\n api_names_attr = API_ATTRS_V1[api_name].names\n api_names = getattr(undecorated_symbol, api_names_attr)\n v1_canonical_name = get_canonical_name(api_names, deprecated_api_names)\n if add_prefix_to_v1_names:\n return 'compat.v1.%s' % v1_canonical_name\n return v1_canonical_name", + "docstring": "Get canonical name for the API symbol. Example: Args: symbol: API function or class. api_name: API name. Currently, only . add_prefix_to_v1_names: Specifies whether a name available only in V1 should be prefixed with compat.v1. Returns: Canonical name for the API symbol (for e.g. initializers.zeros) if canonical name could be determined. Otherwise, returns None.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_export.py", + "ast_data": "FunctionDef name:get_canonical_name_for_symbol arg:symbol arg:api_name arg:add_prefix_to_v1_names arguments arg arg arg If Call Return return:no Assign Assign Call If Compare Return return:no Assign Call Assign Call Assign Call If Return return:yes Assign Assign Call Assign Call If Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "logits", + "source_code": "@property\ndef logits(self):\n return self._logits", + "docstring": "Vector of coordinatewise logits.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\multinomial.py", + "ast_data": "FunctionDef name:logits arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "push", + "source_code": "def push(self, is_building_function, enter_context_fn, device_stack):\n self.stack.append(ContextSwitch(is_building_function, enter_context_fn, device_stack))", + "docstring": "Push metadata about a context switch onto the stack. A context switch can take any one of the two forms: installing a graph as the default graph, or entering the eager context. For each context switch, we record whether or not the entered context is building a function. Args: is_building_function: (bool.) Whether the context is building a function. enter_context_fn: (function.) A callable that executes the context switch. For example, or . device_stack: If applicable, the device function stack for this graph. When breaking out of graphs in init_scope, the innermost nonempty device stack is used. Eager contexts put here and the value is never used.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:push arg:self arg:is_building_function arg:enter_context_fn arg:device_stack arguments arg arg arg arg Call Call" + }, + { + "library": "pytorch", + "name": "total_average", + "source_code": "def total_average(self):\n total_stat = FunctionEventAvg()\n for evt in self:\n total_stat += evt\n total_stat.key = None\n total_stat.key = 'Total'\n return total_stat", + "docstring": "Averages all events. Returns: A FunctionEventAvg object.", + "type": "method", + "file_path": "pytorch\\torch\\autograd\\profiler_util.py", + "ast_data": "FunctionDef name:total_average arg:self arguments arg Assign Call For Assign Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "poly_collection_2d_to_3d", + "source_code": "def poly_collection_2d_to_3d(col, zs=0, zdir='z', axlim_clip=False):\n segments_3d, codes = _paths_to_3d_segments_with_codes(col.get_paths(), zs, zdir)\n col.__class__ = Poly3DCollection\n col.set_verts_and_codes(segments_3d, codes)\n col.set_3d_properties()\n col._axlim_clip = axlim_clip", + "docstring": "Convert a into a object. Parameters ---------- col : The collection to convert. zs : float or array of floats The location or locations to place the polygons in the collection along the *zdir* axis. Default: 0. zdir : {'x', 'y', 'z'} The axis in which to place the patches. Default: 'z'. See for a description of the values.", + "type": "function", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:poly_collection_2d_to_3d arg:col arg:zs arg:zdir arg:axlim_clip arguments arg arg arg arg Assign Call Call Assign Call Call Assign" + }, + { + "library": "pandas", + "name": "__iter__", + "source_code": "def __iter__(self) -> Iterator[Any]:\n for i in range(len(self)):\n yield self[i]", + "docstring": "Iterate over elements of the array.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:__iter__ arg:self arguments arg For Call Call" + }, + { + "library": "kornia", + "name": "Solarize", + "source_code": "class Solarize(OperationBase):\n\n def __init__(self, initial_magnitude: Optional[float]=0.5, initial_probability: float=0.5, magnitude_range: Tuple[float, float]=(0.0, 1.0), temperature: float=0.1, symmetric_megnitude: bool=False) -> None:\n super().__init__(K.RandomSolarize(magnitude_range, additions=0.0, same_on_batch=False, p=initial_probability), initial_magnitude=[('thresholds', initial_magnitude)], temperature=temperature, symmetric_megnitude=symmetric_megnitude, gradient_estimator=STEFunction)", + "docstring": "Apply solarize operation. Args: initial_magnitude: the initial magnitude. initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. magnitude_range: the sampling range for random sampling and clamping the optimized symmetric_megnitude: if to randomly assign the magnitude as negative or not.magnitude. temperature: temperature for RelaxedBernoulli distribution used during training. Note: STE gradient estimator applied for back propagation.", + "type": "class", + "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py", + "ast_data": "ClassDef name:Solarize FunctionDef name:__init__ arg:self arg:initial_magnitude arg:initial_probability arg:magnitude_range arg:temperature arg:symmetric_megnitude arguments arg arg arg arg arg arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "histogram_summary", + "source_code": "@deprecated('2016-11-30', 'Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in.')\ndef histogram_summary(tag, values, collections=None, name=None):\n with ops.name_scope(name, 'HistogramSummary', [tag, values]) as scope:\n val = gen_logging_ops.histogram_summary(tag=tag, values=values, name=scope)\n _Collect(val, collections, [ops.GraphKeys.SUMMARIES])\n return val", + "docstring": "Outputs a protocol buffer with a histogram. This ops is deprecated. Please switch to tf.summary.histogram. For an explanation of why this op was deprecated, and information on how to migrate, look ['here']( The generated []( has one summary value containing a histogram for . This op reports an error if any value is not finite. Args: tag: A . 0-D. Tag to use for the summary value. values: A real numeric . Any shape. Values to use to build the histogram. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to . name: A name for the operation (optional). Returns: A scalar of type . The serialized protocol buffer.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\logging_ops.py", + "ast_data": "FunctionDef name:histogram_summary arg:tag arg:values arg:collections arg:name arguments arg arg arg arg With Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "spatial_2d_padding", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):\n assert len(padding) == 2\n assert len(padding[0]) == 2\n assert len(padding[1]) == 2\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n if data_format == 'channels_first':\n pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]\n else:\n pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]\n return array_ops.pad(x, pattern)", + "docstring": "Pads the 2nd and 3rd dimensions of a 4D tensor. Args: x: Tensor or variable. padding: Tuple of 2 tuples, padding pattern. data_format: One of or . Returns: A padded 4D tensor. Raises: ValueError: if is neither or .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:spatial_2d_padding arg:x arg:padding arg:data_format arguments arg arg arg Compare Call Compare Call Compare Call If Compare Assign Call If Compare Raise Call Call If Compare Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "_fr0", + "source_code": "def _fr0(a):\n if a.ndim == 0:\n a = a.copy()\n a.shape = (1,)\n return a", + "docstring": "fix rank-0 --> rank-1", + "type": "function", + "file_path": "numpy\\numpy\\_core\\getlimits.py", + "ast_data": "FunctionDef name:_fr0 arg:a arguments arg If Compare Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_allowed_types_are_sequence_types", + "source_code": "def _allowed_types_are_sequence_types(allowed_types: Iterable[ir.TypeProtocol]) -> bool:\n return all((isinstance(t, ir.SequenceType) for t in allowed_types))", + "docstring": "Check if all allowed types are Sequence types.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_building.py", + "ast_data": "FunctionDef name:_allowed_types_are_sequence_types arg:allowed_types arguments arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "trigger", + "source_code": "def trigger(self, sender, event, data=None):\n if self._toggled:\n self.disable(event)\n else:\n self.enable(event)\n self._toggled = not self._toggled", + "docstring": "Calls or based on value.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "FunctionDef name:trigger arg:self arg:sender arg:event arg:data arguments arg arg arg arg If Call Call Assign" + }, + { + "library": "kornia", + "name": "is_intensity_only", + "source_code": "def is_intensity_only(self, strict: bool=True) -> bool:\n for arg in self.children():\n if isinstance(arg, (ImageSequential,)) and (not arg.is_intensity_only(strict)):\n return False\n elif isinstance(arg, (ImageSequential,)):\n pass\n elif isinstance(arg, K.IntensityAugmentationBase2D):\n pass\n elif strict:\n return False\n return True", + "docstring": "Check if all transformations are intensity-based. Args: strict: if strict is False, it will allow non-augmentation Modules to be passed. e.g. will be recognized as non-intensity module if strict is set to True. Note: patch processing would break the continuity of labels (e.g. bbounding boxes, masks).", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\image.py", + "ast_data": "FunctionDef name:is_intensity_only arg:self arg:strict arguments arg arg For Call If BoolOp Call Call Return return:yes If Call If Call If Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "_parse_tbody_tr", + "source_code": "def _parse_tbody_tr(self, table):\n raise AbstractMethodError(self)", + "docstring": "Return the list of tbody row elements from the parsed table element. HTML5 table bodies consist of either 0 or more elements (which only contain elements) or 0 or more elements. This method checks for both structures. Parameters ---------- table : a table element that contains row elements. Returns ------- list of node-like These are the row elements of a table.", + "type": "method", + "file_path": "pandas\\pandas\\io\\html.py", + "ast_data": "FunctionDef name:_parse_tbody_tr arg:self arg:table arguments arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "is_impure", + "source_code": "@compatibility(is_backward_compatible=False)\ndef is_impure(self, impure_random: bool=True) -> bool:\n if self.op in {'placeholder', 'output'}:\n return True\n if self.op == 'call_function':\n schema = getattr(self.target, '_schema', None)\n if schema is not None and schema.is_mutable:\n return True\n if impure_random:\n if getattr(self.target, '_nondeterministic_seeded', False):\n return True\n return self.target in _side_effectful_functions\n if self.op == 'call_module':\n assert self.graph.owning_module is not None, 'self.graph.owning_module not set for purity check'\n target_mod = self.graph.owning_module.get_submodule(self.target)\n assert target_mod is not None, f'Did not find expected submodule target {self.target}'\n return getattr(target_mod, '_is_impure', False)\n return False", + "docstring": "Returns whether this op is impure, i.e. if its op is a placeholder or output, or if a call_function or call_module which is impure. Args: impure_random (bool): Whether to treat rand op as impure. Returns: bool: If the op is impure or not.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\node.py", + "ast_data": "FunctionDef name:is_impure arg:self arg:impure_random arguments arg arg If Compare Return return:yes If Compare Assign Call If BoolOp Compare Return return:yes If If Call Return return:yes Return return:yes Compare If Compare Compare Assign Call Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_apply_elementwise", + "source_code": "def _apply_elementwise(self, func: Callable) -> list[list[Any]]:\n return [[None if val is None else func(val) for val in chunk.to_numpy(zero_copy_only=False)] for chunk in self._pa_array.iterchunks()]", + "docstring": "Apply a callable to each element while maintaining the chunking structure.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py", + "ast_data": "FunctionDef name:_apply_elementwise arg:self arg:func arguments arg arg Return return:yes Compare Call Call Call" + }, + { + "library": "pandas", + "name": "maybe_reorder", + "source_code": "def maybe_reorder(arrays: list[ArrayLike], arr_columns: Index, columns: Index, index) -> tuple[list[ArrayLike], Index, Index | None]:\n if len(arrays):\n length = len(arrays[0])\n else:\n length = 0\n result_index = None\n if len(arrays) == 0 and index is None and (length == 0):\n result_index = default_index(0)\n arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length)\n return (arrays, arr_columns, result_index)", + "docstring": "If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a preemptive (cheap) reindex.", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:maybe_reorder arg:arrays arg:arr_columns arg:columns arg:index arguments arg arg arg arg If Call Assign Call Assign Assign If BoolOp Compare Call Compare Compare Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "variable_shape", + "source_code": "@property\ndef variable_shape(self):\n return tensor_shape.TensorShape(self.shape)", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:variable_shape arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "scale_mm_epilogue", + "source_code": "def scale_mm_epilogue():\n\n def epilogue(acc, inv_a_scale, inv_b_scale, bias=None):\n mul_scales = V.ops.mul(inv_a_scale, inv_b_scale)\n mul_acc = V.ops.mul(acc, mul_scales)\n if bias is not None:\n return V.ops.add(mul_acc, bias)\n else:\n return mul_acc\n return epilogue", + "docstring": "Create an epilogue function that applies scaling to matrix multiplication result using the given scale factors. Args: dtype: The data type of the output scale_a: Scale factor for matrix A scale_b: Scale factor for matrix B Returns: Epilogue function that takes the accumulator and applies scaling", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\kernel\\mm_common.py", + "ast_data": "FunctionDef name:scale_mm_epilogue arguments FunctionDef name:epilogue arg:acc arg:inv_a_scale arg:inv_b_scale arg:bias arguments arg arg arg arg Assign Call Assign Call If Compare Return return:yes Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "filecmp", + "source_code": "def filecmp(filename_a, filename_b):\n size_a = FileIO(filename_a, 'rb').size()\n size_b = FileIO(filename_b, 'rb').size()\n if size_a != size_b:\n return False\n crc_a = file_crc32(filename_a)\n crc_b = file_crc32(filename_b)\n return crc_a == crc_b", + "docstring": "Compare two files, returning True if they are the same, False otherwise. We check size first and return False quickly if the files are different sizes. If they are the same size, we continue to generating a crc for the whole file. You might wonder: why not use Python's instead? The answer is that the builtin library is not robust to the many different filesystems TensorFlow runs on, and so we here perform a similar comparison with the more robust FileIO. Args: filename_a: string path to the first file. filename_b: string path to the second file. Returns: True if the files are the same, False otherwise.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", + "ast_data": "FunctionDef name:filecmp arg:filename_a arg:filename_b arguments arg arg Assign Call Call Assign Call Call If Compare Return return:yes Assign Call Assign Call Return return:yes Compare" + }, + { + "library": "django", + "name": "serialize", + "source_code": "def serialize(self):\n return self.serialize_headers() + b'\\r\\n\\r\\n' + self.content", + "docstring": "Full HTTP message, including headers, as a bytestring.", + "type": "method", + "file_path": "django\\django\\http\\response.py", + "ast_data": "FunctionDef name:serialize arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__call__", + "source_code": "def __call__(self, x, y):\n x = np.asarray(x, dtype=np.float64)\n y = np.asarray(y, dtype=np.float64)\n if x.shape != y.shape:\n raise ValueError('x and y must be array-like with the same shape')\n indices = self._cpp_trifinder.find_many(x.ravel(), y.ravel()).reshape(x.shape)\n return indices", + "docstring": "Return an array containing the indices of the triangles in which the specified *x*, *y* points lie, or -1 for points that do not lie within a triangle. *x*, *y* are array-like x and y coordinates of the same shape and any number of dimensions. Returns integer array with the same shape and *x* and *y*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_trifinder.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:x arg:y arguments arg arg arg Assign Call Assign Call If Compare Raise Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "seaborn", + "name": "VariableType", + "source_code": "class VariableType(UserString):\n allowed = ('numeric', 'datetime', 'categorical')\n\n def __init__(self, data):\n assert data in self.allowed, data\n super().__init__(data)\n\n def __eq__(self, other):\n assert other in self.allowed, other\n return self.data == other", + "docstring": "Prevent comparisons elsewhere in the library from using the wrong name. Errors are simple assertions because users should not be able to trigger them. If that changes, they should be more verbose.", + "type": "class", + "file_path": "seaborn\\seaborn\\_base.py", + "ast_data": "ClassDef name:VariableType Assign FunctionDef name:__init__ arg:self arg:data arguments arg arg Compare Call Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Compare Return return:yes Compare" + }, + { + "library": "matplotlib", + "name": "spring", + "source_code": "def spring() -> None:\n set_cmap('spring')", + "docstring": "Set the colormap to 'spring'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", + "ast_data": "FunctionDef name:spring arguments Call" + }, + { + "library": "scipy", + "name": "num_obs_y", + "source_code": "def num_obs_y(Y):\n Y = _asarray(Y)\n is_valid_y(Y, throw=True, name='Y')\n k = Y.shape[0]\n if k == 0:\n raise ValueError('The number of observations cannot be determined on an empty distance matrix.')\n d = int(np.ceil(np.sqrt(k * 2)))\n if d * (d - 1) / 2 != k:\n raise ValueError('Invalid condensed distance matrix passed. Must be some k where k=(n choose 2) for some n >= 2.')\n return d", + "docstring": "Return the number of original observations that correspond to a condensed distance matrix. Parameters ---------- Y : array_like Condensed distance matrix. Returns ------- n : int The number of observations in the condensed distance matrix . Examples -------- Find the number of original observations corresponding to a condensed distance matrix Y. >>> from scipy.spatial.distance import num_obs_y >>> Y = [1, 2, 3.5, 7, 10, 4] >>> num_obs_y(Y) 4", + "type": "function", + "file_path": "scipy\\scipy\\spatial\\distance.py", + "ast_data": "FunctionDef name:num_obs_y arg:Y arguments arg Assign Call Call Assign If Compare Raise Call Assign Call Call Call If Compare Raise Call Return return:yes" + }, + { + "library": "django", + "name": "add_ordering", + "source_code": "def add_ordering(self, *ordering):\n errors = []\n for item in ordering:\n if isinstance(item, str):\n if item == '?':\n continue\n item = item.removeprefix('-')\n if item in self.annotations:\n continue\n if self.extra and item in self.extra:\n continue\n self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)\n elif not hasattr(item, 'resolve_expression'):\n errors.append(item)\n if getattr(item, 'contains_aggregate', False):\n raise FieldError('Using an aggregate in order_by() without also including it in annotate() is not allowed: %s' % item)\n if errors:\n raise FieldError('Invalid order_by arguments: %s' % errors)\n if ordering:\n self.order_by += ordering\n else:\n self.default_ordering = False", + "docstring": "Add items from the 'ordering' sequence to the query's \"order by\" clause. These items are either field names (not column names) -- possibly with a direction prefix ('-' or '?') -- or OrderBy expressions. If 'ordering' is empty, clear all ordering from the query.", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\query.py", + "ast_data": "FunctionDef name:add_ordering arg:self arguments arg arg Assign For If Call If Compare Assign Call If Compare If BoolOp Compare Call Call If Call Call If Call Raise Call If Raise Call If Assign" + }, + { + "library": "tensorflow", + "name": "ismodule", + "source_code": "def ismodule(object):\n return _inspect.ismodule(tf_decorator.unwrap(object)[1])", + "docstring": "TFDecorator-aware replacement for inspect.ismodule.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py", + "ast_data": "FunctionDef name:ismodule arg:object arguments arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "write_metadata", + "source_code": "def write_metadata(self, handler: AppendableTable) -> None:\n if self.metadata is not None:\n handler.write_metadata(self.cname, self.metadata)", + "docstring": "set the meta data", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:write_metadata arg:self arg:handler arguments arg arg If Compare Call" + }, + { + "library": "pandas", + "name": "skew", + "source_code": "@deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='skew')\ndef skew(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Series | Any:\n result = super().skew(axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs)\n if isinstance(result, Series):\n result = result.__finalize__(self, method='skew')\n return result", + "docstring": "Return unbiased skew over requested axis. Normalized by N-1. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. For this parameter is unused and defaults to 0. For DataFrames, specifying `numeric_onlyTrue` to avoid getting an error. >>> df = pd.DataFrame( ... {\"a\": [1, 2, 3], \"b\": [\"T\", \"Z\", \"X\"]}, index=[\"tiger\", \"zebra\", \"cow\"] ... ) >>> df.skew(numeric_only=True) a 0.0 dtype: float64", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:skew arg:self arg:axis arg:skipna arg:numeric_only arguments arg arg arg arg arg Assign Call Call If Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "OnSessionInitResponse", + "source_code": "class OnSessionInitResponse:\n\n def __init__(self, action):\n _check_type(action, str)\n self.action = action", + "docstring": "Response from an on-session-init callback.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py", + "ast_data": "ClassDef name:OnSessionInitResponse FunctionDef name:__init__ arg:self arg:action arguments arg arg Call Assign" + }, + { + "library": "scikit-learn", + "name": "capabilities", + "source_code": "def capabilities(self):\n return {'boolean indexing': True, 'data-dependent shapes': True, 'max dimensions': 64}", + "docstring": "Return a dictionary of array API library capabilities. The resulting dictionary has the following keys: - **\"boolean indexing\"**: boolean indicating whether an array library supports boolean indexing. Always `` for NumPy. See for more details. See Also -------- __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- capabilities : dict A dictionary of array API library capabilities. Examples -------- >>> info = np.__array_namespace_info__() >>> info.capabilities() {'boolean indexing': True, 'data-dependent shapes': True, 'max dimensions': 64}", + "type": "method", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\numpy\\_info.py", + "ast_data": "FunctionDef name:capabilities arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "update_home_views", + "source_code": "def update_home_views(self, figure=None):\n if not figure:\n figure = self.figure\n for a in figure.get_axes():\n if a not in self.home_views[figure]:\n self.home_views[figure][a] = a._get_view()", + "docstring": "Make sure that `` has an entry for all Axes present in the figure.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "FunctionDef name:update_home_views arg:self arg:figure arguments arg arg If Assign For Call If Compare Assign Call" + }, + { + "library": "tensorflow", + "name": "assert_non_negative_v2", + "source_code": "@tf_export('debugging.assert_non_negative', v1=[])\n@dispatch.add_dispatch_support\ndef assert_non_negative_v2(x, message=None, summarize=None, name=None):\n return assert_non_negative(x=x, summarize=summarize, message=message, name=name)", + "docstring": "Assert the condition holds element-wise. This Op checks that holds for every element of . If is empty, this is trivially satisfied. If is not >= 0 everywhere, , as well as the first entries of are printed, and is raised. Args: x: Numeric . message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to \"assert_non_negative\". Returns: Op raising unless is all non-negative. This can be used with inside of s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and is False. The check can be performed immediately during eager execution or if is statically known.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py", + "ast_data": "FunctionDef name:assert_non_negative_v2 arg:x arg:message arg:summarize arg:name arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_dequantize_affine", + "source_code": "@register_custom_op\ndef _dequantize_affine(input: torch.Tensor, block_size: list[int], scale: torch.Tensor, zero_point: Optional[torch.Tensor], input_dtype: torch.dtype, quant_min: Optional[Union[int, float, bool]]=None, quant_max: Optional[Union[int, float, bool]]=None, zero_point_domain: Optional[str]=ZeroPointDomain.INT.name, output_dtype: torch.dtype=torch.float32) -> torch.Tensor:\n if input_dtype not in _SUB_BYTE_UINT_BOUNDS:\n assert input.dtype == input_dtype, f'Expected: {input_dtype}, got: {input.dtype}'\n assert output_dtype in [torch.float32, torch.float16, torch.bfloat16], f'Unsupported output dtype: {output_dtype}'\n quant_min, quant_max = _get_and_check_qmin_qmax(input_dtype, quant_min, quant_max)\n return _dequantize_affine_no_dtype_check(input, block_size, scale, zero_point, quant_min, quant_max, zero_point_domain, output_dtype)", + "docstring": "op definition that has compatible signatures with custom op library", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\_affine_quantization.py", + "ast_data": "FunctionDef name:_dequantize_affine arg:input arg:block_size arg:scale arg:zero_point arg:input_dtype arg:quant_min arg:quant_max arg:zero_point_domain arg:output_dtype arguments arg arg arg arg arg arg arg arg arg If Compare Compare Compare Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_verify_tf_condition", + "source_code": "def _verify_tf_condition(cond, tag):\n extra_hint = 'to check for None, use `is not None`'\n cond = tensor_conversion.convert_to_tensor_v2(cond)\n if cond.dtype != dtypes.bool:\n raise ValueError('condition of {} expected to be `tf.bool` scalar, got {}; to use as boolean Tensor, use `tf.cast`; {}'.format(tag, cond, extra_hint))\n if cond.shape is None or cond.shape.ndims is None:\n cond = array_ops.reshape(cond, ())\n elif cond.shape.ndims > 0:\n known_dims = [d for d in cond.shape.as_list() if d is not None]\n if np.prod(known_dims) > 1:\n raise ValueError('condition of {} expected to be `tf.bool` scalar, got {}; {}'.format(tag, cond, extra_hint))\n else:\n cond = array_ops.reshape(cond, ())\n return cond", + "docstring": "Ensures that the condition can be used in a TF control flow.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py", + "ast_data": "FunctionDef name:_verify_tf_condition arg:cond arg:tag arguments arg arg Assign Assign Call If Compare Raise Call Call If BoolOp Compare Compare Assign Call If Compare Assign Call Compare If Compare Call Raise Call Call Assign Call Return return:yes" + }, + { + "library": "scrapy", + "name": "inspect_response", + "source_code": "def inspect_response(response: Response, spider: Spider) -> None:\n sigint_handler = signal.getsignal(signal.SIGINT)\n Shell(spider.crawler).start(response=response, spider=spider)\n signal.signal(signal.SIGINT, sigint_handler)", + "docstring": "Open a shell to inspect the given response", + "type": "function", + "file_path": "scrapy\\scrapy\\shell.py", + "ast_data": "FunctionDef name:inspect_response arg:response arg:spider arguments arg arg Assign Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "score", + "source_code": "@available_if(_final_estimator_has('score'))\ndef score(self, X, y=None, sample_weight=None, **params):\n with _raise_or_warn_if_not_fitted(self):\n Xt = X\n if not _routing_enabled():\n for _, name, transform in self._iter(with_final=False):\n Xt = transform.transform(Xt)\n score_params = {}\n if sample_weight is not None:\n score_params['sample_weight'] = sample_weight\n return self.steps[-1][1].score(Xt, y, **score_params)\n routed_params = process_routing(self, 'score', sample_weight=sample_weight, **params)\n Xt = X\n for _, name, transform in self._iter(with_final=False):\n Xt = transform.transform(Xt, **routed_params[name].transform)\n return self.steps[-1][1].score(Xt, y, **routed_params[self.steps[-1][0]].score)", + "docstring": "Transform the data, and apply with the final estimator. Call of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls method. Only valid if the final estimator implements . Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Targets used for scoring. Must fulfill label requirements for all steps of the pipeline. sample_weight : array-like, default=None If not None, this argument is passed as `enable_metadata_routing=TrueMetadata Routing User Guide score` on the final estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:score arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg arg With Call Assign If Call For Call Assign Call Assign If Compare Assign Return return:yes Call Assign Call Assign For Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "get_resample", + "source_code": "def get_resample(self):\n return self._resample", + "docstring": "Return whether image resampling is used.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\image.py", + "ast_data": "FunctionDef name:get_resample arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "bf16_compress_hook", + "source_code": "def bf16_compress_hook(state: LowPrecisionState, grad: torch.Tensor, output: Optional[torch.Tensor]=None):\n bf16_hook = functools.partial(_low_precision_hook, torch.bfloat16)\n return bf16_hook(state, grad, output)", + "docstring": "Implement FSDP communication hook for a simple gradient compression approach . Casts ``.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\algorithms\\_comm_hooks\\default_hooks.py", + "ast_data": "FunctionDef name:bf16_compress_hook arg:state arg:grad arg:output arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "__new__", + "source_code": "def __new__(cls, *system, **kwargs):\n if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):\n return system[0].to_tf()\n if cls is TransferFunction:\n if kwargs.get('dt') is None:\n return TransferFunctionContinuous.__new__(TransferFunctionContinuous, *system, **kwargs)\n else:\n return TransferFunctionDiscrete.__new__(TransferFunctionDiscrete, *system, **kwargs)\n return super().__new__(cls)", + "docstring": "Handle object conversion if input is an instance of lti.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:__new__ arg:cls arguments arg arg arg If BoolOp Compare Call Call Return return:yes Call If Compare If Compare Call Return return:yes Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, offset=(0, 0), spacing=10.0, angle=45.0, length=np.sqrt(2), **kwargs):\n super().__init__(offset)\n self._spacing = spacing\n self._angle = angle\n self._length = length\n self._gc = kwargs", + "docstring": "Parameters ---------- offset : (float, float), default: (0, 0) The (x, y) offset to apply to the path, in points. spacing : float, default: 10.0 The spacing between ticks in points. angle : float, default: 45.0 The angle between the path and the tick in degrees. The angle is measured as if you were an ant walking along the curve, with zero degrees pointing directly ahead, 90 to your left, -90 to your right, and 180 behind you. To change side of the ticks, change sign of the angle. length : float, default: 1.414 The length of the tick relative to spacing. Recommended length = 1.414 (sqrt(2)) when angle=45, length=1.0 when angle=90 and length=2.0 when angle=60. **kwargs Extra keywords are stored and passed through to :meth:. Examples -------- See :doc:.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:offset arg:spacing arg:angle arg:length arguments arg arg arg arg arg arg Call Call Call Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "_build_recursive_hd_gather", + "source_code": "def _build_recursive_hd_gather(input_tensors, devices, red_op):\n num_devices = len(devices)\n num_hops = int(math.log(num_devices, 2))\n if num_devices != 2 ** num_hops:\n raise ValueError('num_devices must be a power of 2')\n chunks = input_tensors\n for h in range(0, num_hops):\n span = 2 ** h\n group_size = span * 2\n new_chunks = [[] for _ in devices]\n for d in range(0, num_devices):\n if d % group_size >= group_size / 2:\n continue\n left_dev = devices[d]\n right_dev = devices[d + span]\n left_split = array_ops.split(chunks[d], 2)\n right_split = array_ops.split(chunks[d + span], 2)\n with ops.device(left_dev):\n new_chunks[d] = red_op(left_split[0], right_split[0])\n with ops.device(right_dev):\n new_chunks[d + span] = red_op(left_split[1], right_split[1])\n chunks = new_chunks\n return chunks", + "docstring": "Construct the gather phase of recursive halving-doubling all-reduce. Args: input_tensors: list of to be elementwise reduced. devices: a list of strings naming the devices hosting input_tensors, which will also be used to host the (partial) reduction values. red_op: a binary elementwise reduction Op. Returns: list of which are the fully reduced tensor shards. Raises: ValueError: num_devices not a power of 2, or tensor len not divisible by 2 the proper number of times.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py", + "ast_data": "FunctionDef name:_build_recursive_hd_gather arg:input_tensors arg:devices arg:red_op arguments arg arg arg Assign Call Assign Call Call If Compare Raise Call Assign For Call Assign Assign Assign For Call If Compare Assign Assign Assign Call Assign Call With Call Assign Call With Call Assign Call Assign Return return:yes" + }, + { + "library": "kornia", + "name": "adjust_saturation_raw", + "source_code": "def adjust_saturation_raw(image: Tensor, factor: Union[float, Tensor]) -> Tensor:\n KORNIA_CHECK_IS_TENSOR(image, 'Expected shape (*, H, W)')\n KORNIA_CHECK(isinstance(factor, (float, Tensor)), 'Factor should be float or Tensor.')\n if isinstance(factor, float):\n factor = torch.as_tensor(factor, device=image.device, dtype=image.dtype)\n elif isinstance(factor, Tensor):\n factor = factor.to(image.device, image.dtype)\n while len(factor.shape) != len(image.shape):\n factor = factor[..., None]\n h, s, v = torch.chunk(image, chunks=3, dim=-3)\n s_out: Tensor = torch.clamp(s * factor, min=0, max=1)\n out: Tensor = torch.cat([h, s_out, v], dim=-3)\n return out", + "docstring": "Adjust color saturation of an image. Expecting image to be in hsv format already.", + "type": "function", + "file_path": "kornia\\kornia\\enhance\\adjust.py", + "ast_data": "FunctionDef name:adjust_saturation_raw arg:image arg:factor arguments arg arg Call Call Call If Call Assign Call If Call Assign Call While Compare Call Call Assign Assign Call Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "__add__", + "source_code": "def __add__(self, right: 'Quaternion') -> 'Quaternion':\n KORNIA_CHECK_TYPE(right, Quaternion)\n return Quaternion(self.data + right.data)", + "docstring": "Add a given quaternion. Args: right: the quaternion to add. Example: >>> q1 = Quaternion.identity() >>> q2 = Quaternion(tensor([2., 0., 1., 1.])) >>> q3 = q1 + q2 >>> q3.data Parameter containing: tensor([3., 0., 1., 1.], requires_grad=True)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\quaternion.py", + "ast_data": "FunctionDef name:__add__ arg:self arg:right arguments arg arg Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "add_default_options", + "source_code": "def add_default_options(parser: argparse.ArgumentParser) -> None:\n parser.add_argument('--retries', type=int, default=3, help='number of times to retry if the linter times out.')\n parser.add_argument('--verbose', action='store_true', help='verbose logging')\n parser.add_argument('filenames', nargs='+', help='paths to lint')", + "docstring": "Add default options to a parser. This should be called the last in the chain of add_argument calls.", + "type": "function", + "file_path": "pytorch\\tools\\linter\\adapters\\ruff_linter.py", + "ast_data": "FunctionDef name:add_default_options arg:parser arguments arg Call Call Call" + }, + { + "library": "authlib", + "name": "find_by_kid", + "source_code": "def find_by_kid(self, kid):\n if kid is None and len(self.keys) == 1:\n return self.keys[0]\n for k in self.keys:\n if k.kid == kid:\n return k\n raise ValueError('Invalid JSON Web Key Set')", + "docstring": "Find the key matches the given kid value. :param kid: A string of kid :return: Key instance :raise: ValueError", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7517\\key_set.py", + "ast_data": "FunctionDef name:find_by_kid arg:self arg:kid arguments arg arg If BoolOp Compare Compare Call Return return:yes For If Compare Return return:yes Raise Call" + }, + { + "library": "tensorflow", + "name": "ones_like", + "source_code": "def ones_like(t):\n if t.dtype == dtypes.resource:\n return array_ops.ones(*shape_and_dtype(t))\n else:\n return array_ops.ones_like(t)", + "docstring": "Like array_ops.ones_like, but respects resource handles.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\default_gradient.py", + "ast_data": "FunctionDef name:ones_like arg:t arguments arg If Compare Return return:yes Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "all_variables", + "source_code": "@tf_export(v1=['all_variables'])\n@deprecated('2017-03-02', 'Please use tf.global_variables instead.')\ndef all_variables():\n return global_variables()", + "docstring": "Use instead.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:all_variables arguments Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "load_config", + "source_code": "def load_config(self, maybe_pickled_config: Union[bytes, dict[str, Any]]) -> None:\n if not isinstance(maybe_pickled_config, dict):\n config = pickle.loads(maybe_pickled_config)\n else:\n config = maybe_pickled_config\n for k, v in config.items():\n if k in self._config:\n setattr(self, k, v)\n else:\n from torch._dynamo.utils import warn_once\n warn_once(f'key {k} with value {v} is not understood by this config')", + "docstring": "Restore from a prior call to save_config() or shallow_copy_dict()", + "type": "method", + "file_path": "pytorch\\torch\\utils\\_config_module.py", + "ast_data": "FunctionDef name:load_config arg:self arg:maybe_pickled_config arguments arg arg If Call Assign Call Assign For Call If Compare Call Call" + }, + { + "library": "django", + "name": "value_to_string", + "source_code": "def value_to_string(self, obj):\n return b64encode(self.value_from_object(obj)).decode('ascii')", + "docstring": "Binary data is serialized as base64", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\__init__.py", + "ast_data": "FunctionDef name:value_to_string arg:self arg:obj arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_split_cluster_for_evaluator", + "source_code": "def _split_cluster_for_evaluator(cluster_spec, task_type):\n new_cluster_spec = normalize_cluster_spec(cluster_spec).as_dict()\n if task_type == _TaskType.EVALUATOR:\n assert _TaskType.EVALUATOR in new_cluster_spec\n new_cluster_spec = {_TaskType.EVALUATOR: new_cluster_spec[_TaskType.EVALUATOR]}\n else:\n new_cluster_spec.pop(_TaskType.EVALUATOR, None)\n return normalize_cluster_spec(new_cluster_spec)", + "docstring": "Split the cluster for evaluator since it needn't talk to other tasks.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py", + "ast_data": "FunctionDef name:_split_cluster_for_evaluator arg:cluster_spec arg:task_type arguments arg arg Assign Call Call If Compare Compare Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "maybe_infer_ndim", + "source_code": "def maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int:\n if ndim is None:\n if not isinstance(values.dtype, np.dtype):\n if len(placement) != 1:\n ndim = 1\n else:\n ndim = 2\n else:\n ndim = values.ndim\n return ndim", + "docstring": "If is not provided, infer it from placement and values.", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\api.py", + "ast_data": "FunctionDef name:maybe_infer_ndim arg:values arg:placement arg:ndim arguments arg arg arg If Compare If Call If Compare Call Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_zero_state_tensors", + "source_code": "def _zero_state_tensors(state_size, batch_size, dtype):\n\n def get_state_shape(s):\n c = _concat(batch_size, s)\n size = array_ops.zeros(c, dtype=dtype)\n if not context.executing_eagerly():\n c_static = _concat(batch_size, s, static=True)\n size.set_shape(c_static)\n return size\n return nest.map_structure(get_state_shape, state_size)", + "docstring": "Create tensors of zeros based on state_size, batch_size, and dtype.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\rnn_cell_impl.py", + "ast_data": "FunctionDef name:_zero_state_tensors arg:state_size arg:batch_size arg:dtype arguments arg arg arg FunctionDef name:get_state_shape arg:s arguments arg Assign Call Assign Call If Call Assign Call Call Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "log_json", + "source_code": "def log_json(self, stats: Any) -> None:\n print(stats)", + "docstring": "Logs the stats in json format to stdout.", + "type": "method", + "file_path": "pytorch\\tools\\stats\\monitor.py", + "ast_data": "FunctionDef name:log_json arg:self arg:stats arguments arg arg Call" + }, + { + "library": "matplotlib", + "name": "score_family", + "source_code": "def score_family(self, families, family2):\n if not isinstance(families, (list, tuple)):\n families = [families]\n elif len(families) == 0:\n return 1.0\n family2 = family2.lower()\n step = 1 / len(families)\n for i, family1 in enumerate(families):\n family1 = family1.lower()\n if family1 in font_family_aliases:\n options = [*map(str.lower, self._expand_aliases(family1))]\n if family2 in options:\n idx = options.index(family2)\n return (i + idx / len(options)) * step\n elif family1 == family2:\n return i * step\n return 1.0", + "docstring": "Return a match score between the list of font families in *families* and the font family name *family2*. An exact match at the head of the list returns 0.0. A match further down the list will return between 0 and 1. No match will return 1.0.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py", + "ast_data": "FunctionDef name:score_family arg:self arg:families arg:family2 arguments arg arg arg If Call Assign If Compare Call Return return:yes Assign Call Assign Call For Call Assign Call If Compare Assign Call Call If Compare Assign Call Return return:yes Call If Compare Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "read_merge_rules", + "source_code": "def read_merge_rules(repo: Optional[GitRepo], org: str, project: str) -> list[MergeRule]:\n repo_relative_rules_path = MERGE_RULE_PATH\n if repo is None:\n json_data = gh_fetch_url(f'https://api.github.com/repos/{org}/{project}/contents/{repo_relative_rules_path}', headers={'Accept': 'application/vnd.github.v3+json'}, reader=json.load)\n content = base64.b64decode(json_data['content'])\n return [MergeRule(**x) for x in yaml.safe_load(content)]\n else:\n rules_path = Path(repo.repo_dir) / repo_relative_rules_path\n if not rules_path.exists():\n print(f'{rules_path} does not exist, returning empty rules')\n return []\n with open(rules_path) as fp:\n rc = yaml.safe_load(fp)\n return [MergeRule(**x) for x in rc]", + "docstring": "Returns the list of all merge rules for the repo or project. NB: this function is used in Meta-internal workflows, see the comment at the top of this file for details.", + "type": "function", + "file_path": "pytorch\\.github\\scripts\\trymerge.py", + "ast_data": "FunctionDef name:read_merge_rules arg:repo arg:org arg:project arguments arg arg arg Assign If Compare Assign Call Assign Call Return return:yes Call Call Assign Call If Call Call Return return:no With Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_tpu_host_device_name", + "source_code": "def _tpu_host_device_name(job, task):\n if job is None:\n return '/task:%d/device:CPU:0' % task\n else:\n return '/job:%s/task:%d/device:CPU:0' % (job, task)", + "docstring": "Returns the device name for the CPU device on of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py", + "ast_data": "FunctionDef name:_tpu_host_device_name arg:job arg:task arguments arg arg If Compare Return return:yes Return return:yes" + }, + { + "library": "numpy", + "name": "add_scripts", + "source_code": "def add_scripts(self, *files):\n scripts = self.paths(files)\n dist = self.get_distribution()\n if dist is not None:\n if dist.scripts is None:\n dist.scripts = []\n dist.scripts.extend(scripts)\n else:\n self.scripts.extend(scripts)", + "docstring": "Add scripts to configuration. Add the sequence of files to the beginning of the scripts list. Scripts will be installed under the /bin/ directory.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\misc_util.py", + "ast_data": "FunctionDef name:add_scripts arg:self arguments arg arg Assign Call Assign Call If Compare If Compare Assign Call Call" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self, input, chunkSize=65536):\n self.input = input\n self.chunkSize = chunkSize", + "docstring": "Initialize file_generator with file `` for chunked access.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\__init__.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:input arg:chunkSize arguments arg arg arg Assign Assign" + }, + { + "library": "django", + "name": "DayArchiveView", + "source_code": "class DayArchiveView(MultipleObjectTemplateResponseMixin, BaseDayArchiveView):\n template_name_suffix = '_archive_day'", + "docstring": "List of objects published on a given day.", + "type": "class", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "ClassDef name:DayArchiveView Assign" + }, + { + "library": "scipy", + "name": "skewness", + "source_code": "@abstractmethod\ndef skewness(self, *, method):\n raise NotImplementedError()", + "docstring": "Skewness (standardized third moment) Parameters ---------- method : {None, 'formula', 'general', 'transform', 'normalize', 'cache'} Method used to calculate the standardized third moment. Not all methods are available for all distributions. See for details. See Also -------- moment mean variance References ---------- .. [1] Skewness, *Wikipedia*, Examples -------- Instantiate a distribution with the desired parameters: >>> from scipy import stats >>> X = stats.Normal(mu=1., sigma=2.) Evaluate the skewness: >>> X.skewness() 0.0 >>> X.skewness() == X.moment(order=3, kind='standardized') True", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_probability_distribution.py", + "ast_data": "FunctionDef name:skewness arg:self arguments arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "is_optional_type", + "source_code": "def is_optional_type(type_hint) -> bool:\n origin = get_origin(type_hint)\n if origin is Union:\n args = get_args(type_hint)\n return type(None) in args\n return False", + "docstring": "Special case of is_type.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fuzzer.py", + "ast_data": "FunctionDef name:is_optional_type arg:type_hint arguments arg Assign Call If Compare Assign Call Return return:yes Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "set_shape", + "source_code": "def set_shape(self, shape):\n if not isinstance(shape, tensor_shape.TensorShape):\n shape = tensor_shape.TensorShape(shape)\n if shape.dims is not None:\n dim_list = [dim.value for dim in shape.dims]\n for dim in range(len(dim_list)):\n if dim_list[dim] is None and self.shape.dims is not None:\n dim_list[dim] = self.shape.dims[dim]\n shape = tensor_shape.TensorShape(dim_list)\n if not self.shape.is_compatible_with(shape):\n raise ValueError(\"Keras symbolic input/output's shape %s is notcompatible with supplied shape %s\" % (self.shape, shape))\n else:\n self._type_spec._shape = shape", + "docstring": "Updates the shape of this KerasTensor. Mimics .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py", + "ast_data": "FunctionDef name:set_shape arg:self arg:shape arguments arg arg If Call Assign Call If Compare Assign For Call Call If BoolOp Compare Compare Assign Assign Call If Call Raise Call Assign" + }, + { + "library": "pytorch", + "name": "from_tensors", + "source_code": "@classmethod\ndef from_tensors(cls, name: str, expected: torch.Tensor | float | int | bool, actual: torch.Tensor | float | int | bool) -> VerificationInfo:\n if not isinstance(expected, torch.Tensor):\n expected = torch.tensor(expected)\n if not isinstance(actual, torch.Tensor):\n actual = torch.tensor(actual)\n max_abs_diff, max_rel_diff, abs_diff, rel_diff = _compare_tensors(expected, actual)\n bins = torch.tensor([0.0, 1e-06, 1e-05, 0.0001, 0.001, 0.01, 0.1, 1.0, 10, 1000000], dtype=torch.float)\n abs_diff_hist = torch.histogram(abs_diff.float(), bins=bins)\n rel_diff_hist = torch.histogram(rel_diff.float(), bins=bins)\n return cls(name=name, max_abs_diff=max_abs_diff, max_rel_diff=max_rel_diff, abs_diff_hist=abs_diff_hist, rel_diff_hist=rel_diff_hist, expected_dtype=expected.dtype, actual_dtype=actual.dtype)", + "docstring": "Create a VerificationInfo object from two tensors. Args: name: The name of the value. expected: The expected tensor. actual: The actual tensor. Returns: VerificationInfo: The VerificationInfo object.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_verification.py", + "ast_data": "FunctionDef name:from_tensors arg:cls arg:name arg:expected arg:actual arguments arg arg arg arg If Call Assign Call If Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "convert_to_unspecialized", + "source_code": "def convert_to_unspecialized(self, tx):\n mod = tx.output.get_submodule(self.module_key)\n GenerationTracker.tag(mod)\n if tx.f_code.co_name != '__init__':\n GenerationTracker.mark_class_dynamic(type(mod))\n raise UnspecializeRestartAnalysis", + "docstring": "Restart analysis treating this module as an UnspecializedNNModuleVariable", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\variables\\nn_module.py", + "ast_data": "FunctionDef name:convert_to_unspecialized arg:self arg:tx arguments arg arg Assign Call Call If Compare Call Call Raise" + }, + { + "library": "tensorflow", + "name": "verify_captures", + "source_code": "def verify_captures(op_type, branch_graphs):\n other_branch_graphs = {g: i for i, g in enumerate(branch_graphs)}\n for i, branch_graph in enumerate(branch_graphs):\n for t in branch_graph.external_captures:\n if not isinstance(t, ops.EagerTensor) and t.graph in other_branch_graphs:\n branch_names = ['true_fn', 'false_fn'] if op_type == _COND else ['branch {}'.format(bi) for bi in range(len(branch_graphs))]\n raise ValueError('Tensor {tname} in {b0name} is accessed from {b1name}.'.format(tname=t.name, b0name=branch_names[other_branch_graphs[t.graph]], b1name=branch_names[i]))", + "docstring": "Verify that a branch's tensor is not accessed in another branch fn.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py", + "ast_data": "FunctionDef name:verify_captures arg:op_type arg:branch_graphs arguments arg arg Assign Call For Call For If BoolOp Call Compare Assign Compare Call Call Call Raise Call Call" + }, + { + "library": "pytorch", + "name": "write_outputs", + "source_code": "def write_outputs(self, variable_name: str, filename: str | Path) -> None:\n content = '\\n'.join(('set(', variable_name, *(f' \"{file.as_posix()}\"' for file in sorted(self.files)), ')'))\n self._write_if_changed(filename, content)", + "docstring": "Write a file containing the list of all outputs which are generated by this script.", + "type": "method", + "file_path": "pytorch\\torchgen\\utils.py", + "ast_data": "FunctionDef name:write_outputs arg:self arg:variable_name arg:filename arguments arg arg arg Assign Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "get_capstyle", + "source_code": "@_docstring.interpd\ndef get_capstyle(self):\n return self._capstyle.name if self._capstyle else None", + "docstring": "Return the cap style for the collection (for all its elements). Returns ------- %(CapStyle)s or None", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:get_capstyle arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "to_dense", + "source_code": "def to_dense(self, name='to_dense'):\n with self._name_scope(name):\n return self._to_dense()", + "docstring": "Return a dense (batch) matrix representing this operator.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:to_dense arg:self arg:name arguments arg arg With Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "strings_with_wrong_placed_whitespace", + "source_code": "def strings_with_wrong_placed_whitespace(file_obj: IO[str]) -> Iterable[tuple[int, str]]:\n\n def has_wrong_whitespace(first_line: str, second_line: str) -> bool:\n if first_line.endswith('\\\\n'):\n return False\n elif first_line.startswith(' ') or second_line.startswith(' '):\n return False\n elif first_line.endswith(' ') or second_line.endswith(' '):\n return False\n elif not first_line.endswith(' ') and second_line.startswith(' '):\n return True\n return False\n tokens: list = list(tokenize.generate_tokens(file_obj.readline))\n for first_token, second_token, third_token in zip(tokens, tokens[1:], tokens[2:]):\n if first_token.type == third_token.type == token.STRING and second_token.type == token.NL:\n first_string: str = first_token.string[_get_literal_string_prefix_len(first_token.string) + 1:-1]\n second_string: str = third_token.string[_get_literal_string_prefix_len(third_token.string) + 1:-1]\n if has_wrong_whitespace(first_string, second_string):\n yield (third_token.start[0], 'String has a space at the beginning instead of the end of the previous string.')", + "docstring": "Test case for leading spaces in concated strings. For example: >>> rule = ( ... \"We want the space at the end of the line, \" ... \"not at the beginning\" ... ) Instead of: >>> rule = ( ... \"We want the space at the end of the line,\" ... \" not at the beginning\" ... ) Parameters ---------- file_obj : IO File-like object containing the Python code to validate. Yields ------ line_number : int Line number of unconcatenated string. msg : str Explanation of the error.", + "type": "function", + "file_path": "pandas\\scripts\\validate_unwanted_patterns.py", + "ast_data": "FunctionDef name:strings_with_wrong_placed_whitespace arg:file_obj arguments arg FunctionDef name:has_wrong_whitespace arg:first_line arg:second_line arguments arg arg If Call Return return:yes If BoolOp Call Call Return return:yes If BoolOp Call Call Return return:yes If BoolOp Call Call Return return:yes Return return:yes Call Call For Call If BoolOp Compare Compare Call Call If Call" + }, + { + "library": "tensorflow", + "name": "_check_archive_signature", + "source_code": "def _check_archive_signature(archive_file: io.BufferedIOBase) -> None:\n signature = archive_file.read(8)\n if signature != b'!\\n':\n raise RuntimeError('Invalid archive file format.')", + "docstring": "Checks if the file has the correct archive header signature. The cursor is moved to the first available file header section after successfully checking the signature. Args: archive_file: The archive file object pointing at its beginning. Raises: RuntimeError: The archive signature is invalid.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\ios\\extract_object_files.py", + "ast_data": "FunctionDef name:_check_archive_signature arg:archive_file arguments arg Assign Call If Compare Raise Call" + }, + { + "library": "tensorflow", + "name": "flat_values_spec", + "source_code": "@property\ndef flat_values_spec(self):\n return self._flat_values_spec", + "docstring": "The of the flat_values of RaggedTensor. Returns: - The TypeSpec of flat_values. - None when the flat_values is a Tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", + "ast_data": "FunctionDef name:flat_values_spec arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "MethodDispatcher", + "source_code": "class MethodDispatcher(Dispatcher):\n __slots__ = ('obj', 'cls')\n\n @classmethod\n def get_func_params(cls, func):\n if hasattr(inspect, 'signature'):\n sig = inspect.signature(func)\n return itl.islice(sig.parameters.values(), 1, None)\n\n def __get__(self, instance, owner):\n self.obj = instance\n self.cls = owner\n return self\n\n def __call__(self, *args, **kwargs):\n types = tuple([type(arg) for arg in args])\n func = self.dispatch(*types)\n if not func:\n raise NotImplementedError(f'Could not find signature for {self.name}: <{str_signature(types)}>')\n return func(self.obj, *args, **kwargs)", + "docstring": "Dispatch methods based on type signature See Also: Dispatcher", + "type": "class", + "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\dispatcher.py", + "ast_data": "ClassDef name:MethodDispatcher Assign FunctionDef name:get_func_params arg:cls arg:func arguments arg arg If Call Assign Call Return return:yes Call Call FunctionDef name:__get__ arg:self arg:instance arg:owner arguments arg arg arg Assign Assign Return return:yes FunctionDef name:__call__ arg:self arguments arg arg arg Assign Call Call Assign Call If Raise Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_use_sharded_flat_param", + "source_code": "def _use_sharded_flat_param(self) -> None:\n flat_param = self.flat_param\n if self._use_orig_params:\n in_forward = self._training_state == HandleTrainingState.FORWARD\n skip_use_sharded_views = torch.is_grad_enabled() and in_forward and (self._sharding_strategy in NO_RESHARD_AFTER_FORWARD_HANDLE_STRATEGIES)\n if skip_use_sharded_views:\n unsharded_flat_param = flat_param.data\n if self._offload_params:\n device = flat_param._local_shard.device\n _p_assert(device == torch.device('cpu'), f'Expects the local shard to be on CPU but got {device}')\n flat_param.data = flat_param._local_shard\n if self._use_orig_params:\n if skip_use_sharded_views:\n self._unsharded_flat_param_for_skipped_views = unsharded_flat_param\n else:\n self._use_sharded_views()\n if in_forward and (not self._skipped_use_sharded_views):\n accumulated_grad_in_no_sync = flat_param.grad is not None and self.uses_sharded_strategy and (flat_param.grad.shape == flat_param._unpadded_unsharded_size)\n if accumulated_grad_in_no_sync:\n self._use_unsharded_grad_views()\n else:\n self._use_sharded_grad_views()", + "docstring": "Switches to using the sharded flat parameter.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py", + "ast_data": "FunctionDef name:_use_sharded_flat_param arg:self arguments arg Assign If Assign Compare Assign BoolOp Call Compare If Assign If Assign Call Compare Call Assign If If Assign Call If BoolOp Assign BoolOp Compare Compare If Call Call" + }, + { + "library": "tensorflow", + "name": "commands", + "source_code": "def commands(self) -> List[List[str]]:\n cmds = []\n cmds.extend(self.extra_setup_commands)\n macos_build = self.type_ == BuildType.XLA_MACOS_X86_CPU_KOKORO or self.type_ == BuildType.XLA_MACOS_ARM64_CPU_KOKORO\n if not macos_build:\n cmds.append(retry(self.bazel_command(subcommand='build', extra_options=('--nobuild',))))\n cmds.append(self.bazel_command(subcommand=self.subcommand))\n cmds.append(['bazel', 'analyze-profile', 'profile.json.gz'])\n return cmds", + "docstring": "Returns list of commands for a build.", + "type": "method", + "file_path": "tensorflow\\third_party\\xla\\build_tools\\ci\\build.py", + "ast_data": "FunctionDef name:commands arg:self arguments arg Assign Call Assign BoolOp Compare Compare If Call Call Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_weighted_categorical_column", + "source_code": "def _weighted_categorical_column(categorical_column, weight_feature_key, dtype=dtypes.float32):\n if dtype is None or not (dtype.is_integer or dtype.is_floating):\n raise ValueError('dtype {} is not convertible to float.'.format(dtype))\n return _WeightedCategoricalColumn(categorical_column=categorical_column, weight_feature_key=weight_feature_key, dtype=dtype)", + "docstring": "Applies weight values to a . Use this when each of your sparse inputs has both an ID and a value. For example, if you're representing text documents as a collection of word frequencies, you can provide 2 parallel sparse input features ('terms' and 'frequencies' below). Example: Input objects: This assumes the input dictionary contains a for key 'terms', and a for key 'frequencies'. These 2 tensors must have the same indices and dense shape. Args: categorical_column: A created by functions. weight_feature_key: String key for weight values. dtype: Type of weights, such as . Only float and integer weights are supported. Returns: A composed of two sparse features: one represents id, the other represents weight (value) of the id feature in that example. Raises: ValueError: if is not convertible to float.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_weighted_categorical_column arg:categorical_column arg:weight_feature_key arg:dtype arguments arg arg arg If BoolOp Compare BoolOp Raise Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_L2LossGrad", + "source_code": "@ops.RegisterGradient('L2Loss')\ndef _L2LossGrad(op: ops.Operation, grad):\n return op.inputs[0] * grad", + "docstring": "Return the gradients for L2Loss. Args: op: The L2LossOp for which we need to generate gradients. grad: Tensor containing a single number. Returns: The gradient, which is (x * grad).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py", + "ast_data": "FunctionDef name:_L2LossGrad arg:op arg:grad arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "_eps_for_method", + "source_code": "@functools.lru_cache\ndef _eps_for_method(x0_dtype, f0_dtype, method):\n EPS = np.finfo(np.float64).eps\n x0_is_fp = False\n if np.issubdtype(x0_dtype, np.inexact):\n EPS = np.finfo(x0_dtype).eps\n x0_itemsize = np.dtype(x0_dtype).itemsize\n x0_is_fp = True\n if np.issubdtype(f0_dtype, np.inexact):\n f0_itemsize = np.dtype(f0_dtype).itemsize\n if x0_is_fp and f0_itemsize < x0_itemsize:\n EPS = np.finfo(f0_dtype).eps\n if method in ['2-point', 'cs']:\n return EPS ** 0.5\n elif method in ['3-point']:\n return EPS ** (1 / 3)\n else:\n raise RuntimeError(\"Unknown step method, should be one of {'2-point', '3-point', 'cs'}\")", + "docstring": "Calculates relative EPS step to use for a given data type and numdiff step method. Progressively smaller steps are used for larger floating point types. Parameters ---------- f0_dtype: np.dtype dtype of function evaluation x0_dtype: np.dtype dtype of parameter vector method: {'2-point', '3-point', 'cs'} Returns ------- EPS: float relative step size. May be np.float16, np.float32, np.float64 Notes ----- The default relative step will be np.float64. However, if x0 or f0 are smaller floating point types (np.float16, np.float32), then the smallest floating point type is chosen.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_numdiff.py", + "ast_data": "FunctionDef name:_eps_for_method arg:x0_dtype arg:f0_dtype arg:method arguments arg arg arg Assign Call Assign If Call Assign Call Assign Call Assign If Call Assign Call If BoolOp Compare Assign Call If Compare Return return:yes If Compare Return return:yes Raise Call" + }, + { + "library": "tensorflow", + "name": "_restore_slot_variable", + "source_code": "def _restore_slot_variable(self, slot_name, variable, slot_variable):\n variable_key = _var_key(variable)\n deferred_restorations = self._deferred_slot_restorations.get(slot_name, {}).pop(variable_key, [])\n deferred_restorations.sort(key=lambda position: position.restore_uid, reverse=True)\n for checkpoint_position in deferred_restorations:\n checkpoint_position.restore(slot_variable)", + "docstring": "Restore a newly created slot variable's value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py", + "ast_data": "FunctionDef name:_restore_slot_variable arg:self arg:slot_name arg:variable arg:slot_variable arguments arg arg arg arg Assign Call Assign Call Call Call arguments arg For Call" + }, + { + "library": "scikit-learn", + "name": "partial_fit", + "source_code": "@available_if(lambda est: est._check_solver)\n@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y, sample_weight=None):\n return self._fit(X, y, sample_weight=sample_weight, incremental=True)", + "docstring": "Update the model with a single iteration over the given data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : ndarray of shape (n_samples,) The target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. .. versionadded:: 1.6 Returns ------- self : object Trained MLP model.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py", + "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Return return:yes Call Call arguments arg Call" + }, + { + "library": "matplotlib", + "name": "__call__", + "source_code": "def __call__(self, path, mutation_size, linewidth, aspect_ratio=1.0):\n if aspect_ratio is not None:\n vertices = path.vertices / [1, aspect_ratio]\n path_shrunk = Path(vertices, path.codes)\n path_mutated, fillable = self.transmute(path_shrunk, mutation_size, linewidth)\n if np.iterable(fillable):\n path_list = [Path(p.vertices * [1, aspect_ratio], p.codes) for p in path_mutated]\n return (path_list, fillable)\n else:\n return (path_mutated, fillable)\n else:\n return self.transmute(path, mutation_size, linewidth)", + "docstring": "The __call__ method is a thin wrapper around the transmute method and takes care of the aspect ratio.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:path arg:mutation_size arg:linewidth arg:aspect_ratio arguments arg arg arg arg arg If Compare Assign Assign Call Assign Call If Call Assign Call Return return:yes Return return:yes Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, X):\n if isinstance(X, str):\n raise ValueError('Iterable over raw text documents expected, string object received.')\n self._validate_ngram_range()\n analyzer = self.build_analyzer()\n X = self._get_hasher().transform((analyzer(doc) for doc in X))\n if self.binary:\n X.data.fill(1)\n if self.norm is not None:\n X = normalize(X, norm=self.norm, copy=False)\n return X", + "docstring": "Transform a sequence of documents to a document-term matrix. Parameters ---------- X : iterable over raw text documents, length = n_samples Samples. Each sample must be a text document (either bytes or unicode strings, file name or file object depending on the constructor argument) which will be tokenized and hashed. Returns ------- X : sparse matrix of shape (n_samples, n_features) Document-term matrix.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py", + "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg If Call Raise Call Call Assign Call Assign Call Call Call If Call If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "OpResolverType", + "source_code": "@_tf_export('lite.experimental.OpResolverType')\n@enum.unique\nclass OpResolverType(enum.Enum):\n AUTO = 0\n BUILTIN = 1\n BUILTIN_REF = 2\n BUILTIN_WITHOUT_DEFAULT_DELEGATES = 3", + "docstring": "Different types of op resolvers for Tensorflow Lite. * : Indicates the op resolver that is chosen by default in TfLite Python, which is the \"BUILTIN\" as described below. * : Indicates the op resolver for built-in ops with optimized kernel implementation. * : Indicates the op resolver for built-in ops with reference kernel implementation. It's generally used for testing and debugging. * : Indicates the op resolver for built-in ops with optimized kernel implementation, but it will disable the application of default TfLite delegates (like the XNNPACK delegate) to the model graph. Generally this should not be used unless there are issues with the default configuration.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py", + "ast_data": "ClassDef name:OpResolverType Assign Assign Assign Assign Call" + }, + { + "library": "scrapy", + "name": "join", + "source_code": "async def join(self) -> None:\n while self._active:\n await asyncio.gather(*self._active)", + "docstring": "Completes when all managed :attr: have completed their executions.", + "type": "method", + "file_path": "scrapy\\scrapy\\crawler.py", + "ast_data": "AsyncFunctionDef name:join arg:self arguments arg While Call" + }, + { + "library": "pytorch", + "name": "maybe_disable_inference_mode", + "source_code": "@contextmanager\ndef maybe_disable_inference_mode() -> Generator[None, None, None]:\n is_inference_mode_on = config.fake_tensor_disable_inference_mode and torch.is_inference_mode_enabled()\n if is_inference_mode_on:\n with torch.inference_mode(False), torch.no_grad():\n yield\n else:\n yield", + "docstring": "Disables torch.inference_mode for the compilation (still on at runtime). This simplifies the compile stack where we can assume that inference_mode will always be off. Since inference_mode is equivalent to no_grad + some optimizations (version counts etc), we turn on no_grad here. The other optimizations are not relevant to torch.compile.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:maybe_disable_inference_mode arguments Assign BoolOp Call If With Call Call" + }, + { + "library": "tensorflow", + "name": "_get_original_model_type", + "source_code": "def _get_original_model_type(self):\n model_type = TFLiteConverterBase._original_model_type\n TFLiteConverterBase._original_model_type = conversion_metadata_fb.ModelType.NONE\n return model_type", + "docstring": "One-time getter to return original model type and set it to NONE.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:_get_original_model_type arg:self arguments arg Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "build_chunks", + "source_code": "def build_chunks(self) -> int:\n if _ABOVE_MAX_SIZE(self.proto_size):\n constant_bytes = chunk_constant_value(self._proto, self.proto_size)\n self.add_chunk(constant_bytes, ['attr', 'value', 'tensor', 'tensor_content'])\n return len(constant_bytes)\n return 0", + "docstring": "Splits a NodeDef proto, and returns the size of the chunks created.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split_graph_def.py", + "ast_data": "FunctionDef name:build_chunks arg:self arguments arg If Call Assign Call Call Return return:yes Call Return return:yes" + }, + { + "library": "sphinx", + "name": "is_serializable", + "source_code": "def is_serializable(obj: object, *, _seen: frozenset[int]=frozenset()) -> bool:\n if isinstance(obj, UNSERIALIZABLE_TYPES):\n return False\n if id(obj) in _seen:\n return True\n if isinstance(obj, dict):\n seen = _seen | {id(obj)}\n return all((is_serializable(key, _seen=seen) and is_serializable(value, _seen=seen) for key, value in obj.items()))\n elif isinstance(obj, list | tuple | set | frozenset):\n seen = _seen | {id(obj)}\n return all((is_serializable(item, _seen=seen) for item in obj))\n return True", + "docstring": "Check if an object is serializable or not.", + "type": "function", + "file_path": "sphinx\\sphinx\\config.py", + "ast_data": "FunctionDef name:is_serializable arg:obj arguments arg arg Call If Call Return return:yes If Compare Call Return return:yes If Call Assign Call Return return:yes Call BoolOp Call Call Call If Call Assign Call Return return:yes Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_finish_prob_for_one_fiber", + "source_code": "def _finish_prob_for_one_fiber(self, y, x, ildj, event_ndims):\n x = self._maybe_rotate_dims(x, rotate_right=True)\n prob = self.distribution.prob(x)\n if self._is_maybe_event_override:\n prob = math_ops.reduce_prod(prob, self._reduce_event_indices)\n prob *= math_ops.exp(math_ops.cast(ildj, prob.dtype))\n if self._is_maybe_event_override and isinstance(event_ndims, int):\n prob.set_shape(array_ops.broadcast_static_shape(y.get_shape().with_rank_at_least(1)[:-event_ndims], self.batch_shape))\n return prob", + "docstring": "Finish computation of prob on one element of the inverse image.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py", + "ast_data": "FunctionDef name:_finish_prob_for_one_fiber arg:self arg:y arg:x arg:ildj arg:event_ndims arguments arg arg arg arg arg Assign Call Assign Call If Assign Call Call Call If BoolOp Call Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "forward", + "source_code": "def forward(self, inference_args=None, input_tangents=None):\n del inference_args\n if input_tangents:\n raise errors.InternalError('unexpectedly got forwardprop information in a class that does not support forwardprop.')\n return self._inference_function", + "docstring": "A forward function with only user-specified outputs. The call operation for the returned inference function can be rewritten into a forward function. This only happens if the backward function (from the method) ends up being used to compute gradients. This approach avoids constructing unnecessary graphs, but it only works if we are calling this function when not executing eagerly. Args: inference_args: A flat list of Tensors, arguments to the inference function. Unused, but taken for compatibility with _TapeGradientFunctions. input_tangents: A flat list of Tensors, jvps associated with . Unused; if required, tape functions must be used instead. Returns: An atomic_function.AtomicFunction.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", + "ast_data": "FunctionDef name:forward arg:self arg:inference_args arg:input_tangents arguments arg arg arg If Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_run_static_range_qat", + "source_code": "def _run_static_range_qat(src_saved_model_path: str, dst_saved_model_path: str, quant_opts: _QuantizationOptions, signature_def_map: _SignatureDefMap) -> None:\n logging.info('Running static-range quantization for QAT model.')\n pywrap_quantize_model.quantize_qat_model(src_saved_model_path, dst_saved_model_path, quantization_options_serialized=quant_opts.SerializeToString(), signature_keys=list(quant_opts.signature_keys), signature_def_map_serialized=_serialize_signature_def_map(signature_def_map), py_function_library=py_function_lib.PyFunctionLibrary())", + "docstring": "Runs static-range quantization for a Quantization-Aware Trained model. Runs the quantization for a model trained using QAT. Args: src_saved_model_path: Path to the source SavedModel directory. dst_saved_model_path: Path to the destination SavedModel directory. quant_opts: Quantization options. signature_def_map: Signature def key -> SignatureDef mapping.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\quantize_model.py", + "ast_data": "FunctionDef name:_run_static_range_qat arg:src_saved_model_path arg:dst_saved_model_path arg:quant_opts arg:signature_def_map arguments arg arg arg arg Call Call Call Call Call Call" + }, + { + "library": "pandas", + "name": "sort_values", + "source_code": "def sort_values(self, *, return_indexer: bool=False, ascending: bool=True, na_position: NaPosition='last', key: Callable | None=None) -> Self | tuple[Self, np.ndarray]:\n if key is None and (ascending and self.is_monotonic_increasing or (not ascending and self.is_monotonic_decreasing)):\n if return_indexer:\n indexer = np.arange(len(self), dtype=np.intp)\n return (self.copy(), indexer)\n else:\n return self.copy()\n if not isinstance(self, ABCMultiIndex):\n _as = nargsort(items=self, ascending=ascending, na_position=na_position, key=key)\n else:\n idx = cast(Index, ensure_key_mapped(self, key))\n _as = idx.argsort(na_position=na_position)\n if not ascending:\n _as = _as[::-1]\n sorted_index = self.take(_as)\n if return_indexer:\n return (sorted_index, _as)\n else:\n return sorted_index", + "docstring": "Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the argument in the builtin :meth: function, with the notable difference that this function should be *vectorized*. It should expect an `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:sort_values arg:self arguments arg arg arg arg arg If BoolOp Compare BoolOp BoolOp BoolOp If Assign Call Call Return return:yes Call Return return:yes Call If Call Assign Call Assign Call Call Assign Call If Assign Assign Call If Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "StarPolygonCollection", + "source_code": "class StarPolygonCollection(RegularPolyCollection):\n _path_generator = mpath.Path.unit_regular_star", + "docstring": "Draw a collection of regular stars with *numsides* points.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "ClassDef name:StarPolygonCollection Assign" + }, + { + "library": "cherrypy", + "name": "AntiStampedeCache", + "source_code": "class AntiStampedeCache(dict):\n\n def wait(self, key, timeout=5, debug=False):\n value = self.get(key)\n if isinstance(value, threading.Event):\n if timeout is None:\n if debug:\n cherrypy.log('No timeout', 'TOOLS.CACHING')\n return None\n if debug:\n cherrypy.log('Waiting up to %s seconds' % timeout, 'TOOLS.CACHING')\n value.wait(timeout)\n if value.result is not None:\n if debug:\n cherrypy.log('Result!', 'TOOLS.CACHING')\n return value.result\n if debug:\n cherrypy.log('Timed out', 'TOOLS.CACHING')\n e = threading.Event()\n e.result = None\n dict.__setitem__(self, key, e)\n return None\n elif value is None:\n if debug:\n cherrypy.log('Timed out', 'TOOLS.CACHING')\n e = threading.Event()\n e.result = None\n dict.__setitem__(self, key, e)\n return value\n\n def __setitem__(self, key, value):\n existing = self.get(key)\n dict.__setitem__(self, key, value)\n if isinstance(existing, threading.Event):\n existing.result = value\n existing.set()", + "docstring": "A storage system for cached items which reduces stampede collisions.", + "type": "class", + "file_path": "cherrypy\\cherrypy\\lib\\caching.py", + "ast_data": "ClassDef name:AntiStampedeCache FunctionDef name:wait arg:self arg:key arg:timeout arg:debug arguments arg arg arg arg Assign Call If Call If Compare If Call Return return:no If Call Call If Compare If Call Return return:yes If Call Assign Call Assign Call Return return:no If Compare If Call Assign Call Assign Call Return return:yes FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg Assign Call Call If Call Assign Call" + }, + { + "library": "kornia", + "name": "aepe", + "source_code": "def aepe(input: torch.Tensor, target: torch.Tensor, reduction: str='mean') -> torch.Tensor:\n KORNIA_CHECK_IS_TENSOR(input)\n KORNIA_CHECK_IS_TENSOR(target)\n KORNIA_CHECK_SHAPE(input, ['*', '2'])\n KORNIA_CHECK_SHAPE(target, ['*', '2'])\n KORNIA_CHECK(input.shape == target.shape, f'input and target shapes must be the same. Got: {input.shape} and {target.shape}')\n epe: Tensor = ((input[..., 0] - target[..., 0]) ** 2 + (input[..., 1] - target[..., 1]) ** 2).sqrt()\n if reduction == 'mean':\n epe = epe.mean()\n elif reduction == 'sum':\n epe = epe.sum()\n elif reduction == 'none':\n pass\n else:\n raise NotImplementedError('Invalid reduction option.')\n return epe", + "docstring": "Create a function that calculates the average endpoint error (AEPE) between 2 flow maps. AEPE is the endpoint error between two 2D vectors (e.g., optical flow). Given a h x w x 2 optical flow map, the AEPE is: .. math:: \\text{AEPE}=\\frac{1}{hw}\\sum_{i=1, j=1}^{h, w}\\sqrt{(I_{i,j,1}-T_{i,j,1})^{2}+(I_{i,j,2}-T_{i,j,2})^{2}} Args: input: the input flow map with shape :math:. target: the target flow map with shape :math:. reduction : Specifies the reduction to apply to the output: ``: the output will be summed. Return: the computed AEPE as a scalar. Examples: >>> ones = torch.ones(4, 4, 2) >>> aepe(ones, 1.2 * ones) tensor(0.2828) Reference:", + "type": "function", + "file_path": "kornia\\kornia\\metrics\\endpoint_error.py", + "ast_data": "FunctionDef name:aepe arg:input arg:target arg:reduction arguments arg arg arg Call Call Call Call Call Compare Call If Compare Assign Call If Compare Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "pandas", + "name": "_ensure_dtype_type", + "source_code": "def _ensure_dtype_type(value, dtype: np.dtype):\n if dtype == _dtype_obj:\n return value\n return dtype.type(value)", + "docstring": "Ensure that the given value is an instance of the given dtype. e.g. if out dtype is np.complex64_, we should have an instance of that as opposed to a python complex object. Parameters ---------- value : object dtype : np.dtype Returns ------- object", + "type": "function", + "file_path": "pandas\\pandas\\core\\dtypes\\cast.py", + "ast_data": "FunctionDef name:_ensure_dtype_type arg:value arg:dtype arguments arg arg If Compare Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__getitem__", + "source_code": "def __getitem__(self, key):\n if key is None:\n key = self._key()\n value = self._get_recursive(key)\n if value is None:\n value = self[key] = self.default_factory()\n return value", + "docstring": "Gets the value at key (or current context), or sets default value. Args: key: May be or object. When , the key is set to the current context. Returns: Either the cached or default value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:key arguments arg arg If Compare Assign Call Assign Call If Compare Assign Call Return return:yes" + }, + { + "library": "sphinx", + "name": "_from_environment_default", + "source_code": "@classmethod\ndef _from_environment_default(cls, *, env: BuildEnvironment) -> Self:\n from sphinx.domains.c import CDomain\n from sphinx.domains.changeset import ChangeSetDomain\n from sphinx.domains.citation import CitationDomain\n from sphinx.domains.cpp import CPPDomain\n from sphinx.domains.index import IndexDomain\n from sphinx.domains.javascript import JavaScriptDomain\n from sphinx.domains.math import MathDomain\n from sphinx.domains.python import PythonDomain\n from sphinx.domains.rst import ReSTDomain\n from sphinx.domains.std import StandardDomain\n return cls(c=CDomain(env), changeset=ChangeSetDomain(env), citation=CitationDomain(env), cpp=CPPDomain(env), index=IndexDomain(env), js=JavaScriptDomain(env), math=MathDomain(env), py=PythonDomain(env), rst=ReSTDomain(env), std=StandardDomain(env))", + "docstring": "Return a default instance with every domain we require.", + "type": "method", + "file_path": "sphinx\\sphinx\\domains\\_domains_container.py", + "ast_data": "FunctionDef name:_from_environment_default arg:cls arguments arg arg Return return:yes Call Call Call Call Call Call Call Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "solve", + "source_code": "def solve(self, X, y, sample_weight):\n self.setup(X=X, y=y, sample_weight=sample_weight)\n self.iteration = 1\n self.converged = False\n self.use_fallback_lbfgs_solve = False\n while self.iteration <= self.max_iter and (not self.converged):\n if self.verbose:\n print(f'Newton iter={self.iteration}')\n self.use_fallback_lbfgs_solve = False\n self.update_gradient_hessian(X=X, y=y, sample_weight=sample_weight)\n self.inner_solve(X=X, y=y, sample_weight=sample_weight)\n if self.use_fallback_lbfgs_solve:\n break\n self.line_search(X=X, y=y, sample_weight=sample_weight)\n if self.use_fallback_lbfgs_solve:\n break\n self.check_convergence(X=X, y=y, sample_weight=sample_weight)\n self.iteration += 1\n if not self.converged:\n if self.use_fallback_lbfgs_solve:\n self.fallback_lbfgs_solve(X=X, y=y, sample_weight=sample_weight)\n else:\n warnings.warn(f'Newton solver did not converge after {self.iteration - 1} iterations.', ConvergenceWarning)\n self.iteration -= 1\n self.finalize(X=X, y=y, sample_weight=sample_weight)\n return self.coef", + "docstring": "Solve the optimization problem. This is the main routine. Order of calls: self.setup() while iteration: self.update_gradient_hessian() self.inner_solve() self.line_search() self.check_convergence() self.finalize() Returns ------- coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) Solution of the optimization problem.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_glm\\_newton_solver.py", + "ast_data": "FunctionDef name:solve arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Call Assign Assign Assign While BoolOp Compare If Call Assign Call Call If Call If Call If If Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "can_fuse_horizontal", + "source_code": "@staticmethod\ndef can_fuse_horizontal(scheduler: Scheduler, node1: BaseSchedulerNode, node2: BaseSchedulerNode, shared_data_score: int) -> bool:\n if shared_data_score < config.score_fusion_memory_threshold:\n WhyNoFuse(node1, node2)('score_fusion_memory_threshold')\n return False\n if scheduler.are_long_distant_nodes(node1, node2):\n WhyNoFuse(node1, node2)('Nodes are too far away. Fusing them may increase peak memory.')\n return False\n return True", + "docstring": "Hook for heuristics to prevent horizontal (consumer/consumer) fusions", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\choices.py", + "ast_data": "FunctionDef name:can_fuse_horizontal arg:scheduler arg:node1 arg:node2 arg:shared_data_score arguments arg arg arg arg If Compare Call Call Return return:yes If Call Call Call Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "getvalue", + "source_code": "def getvalue(self):\n if callable(getattr(self.stream, 'getvalue', None)):\n return self.stream.getvalue()", + "docstring": "Return the fully serialized queryset (or None if the output stream is not seekable).", + "type": "method", + "file_path": "django\\django\\core\\serializers\\base.py", + "ast_data": "FunctionDef name:getvalue arg:self arguments arg If Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "GraphConstructionError", + "source_code": "class GraphConstructionError(ConversionError):\n pass", + "docstring": "Error during ONNX graph construction.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_errors.py", + "ast_data": "ClassDef name:GraphConstructionError" + }, + { + "library": "pytorch", + "name": "QuantizationSpec", + "source_code": "@dataclass(eq=True, frozen=True)\nclass QuantizationSpec(QuantizationSpecBase):\n dtype: torch.dtype\n observer_or_fake_quant_ctr: _ObserverOrFakeQuantizeConstructor\n quant_min: Optional[int] = None\n quant_max: Optional[int] = None\n qscheme: Optional[torch.qscheme] = None\n ch_axis: Optional[int] = None\n is_dynamic: bool = False\n\n def __post_init__(self):\n if self.quant_min is not None and self.quant_max is not None and (self.quant_min > self.quant_max):\n raise ValueError(f'quant_min {self.quant_min} must be <= quant_max {self.quant_max}.')\n if self.ch_axis is not None and self.ch_axis < 0:\n raise ValueError('Ch_axis is < 0.')", + "docstring": "Quantization spec for common operators that allows user to specify how to quantize a Tensor, this includes dtype, quant_min, quant_max etc.", + "type": "class", + "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\quantizer.py", + "ast_data": "ClassDef name:QuantizationSpec FunctionDef name:__post_init__ arg:self arguments arg If BoolOp Compare Compare Compare Raise Call If BoolOp Compare Compare Raise Call Call" + }, + { + "library": "pandas", + "name": "_refine_percentiles", + "source_code": "def _refine_percentiles(percentiles: Sequence[float] | np.ndarray | None) -> npt.NDArray[np.float64]:\n if percentiles is None:\n return np.array([0.25, 0.5, 0.75])\n percentiles = list(percentiles)\n validate_percentile(percentiles)\n percentiles = np.asarray(percentiles)\n unique_pcts = np.unique(percentiles)\n assert percentiles is not None\n if len(unique_pcts) < len(percentiles):\n raise ValueError('percentiles cannot contain duplicates')\n return unique_pcts", + "docstring": "Ensure that percentiles are unique and sorted. Parameters ---------- percentiles : list-like of numbers, optional The percentiles to include in the output.", + "type": "function", + "file_path": "pandas\\pandas\\core\\methods\\describe.py", + "ast_data": "FunctionDef name:_refine_percentiles arg:percentiles arguments arg If Compare Return return:yes Call Assign Call Call Assign Call Assign Call Compare If Compare Call Call Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_logger_dict_helper", + "source_code": "def _get_logger_dict_helper(mod: nn.Module, target_dict: dict[str, Any], prefix: str='') -> None:\n\n def get_prefix(prefix):\n return prefix if prefix == '' else prefix + '.'\n for name, child in mod.named_children():\n if isinstance(child, Logger):\n target_dict[get_prefix(prefix) + 'stats'] = child.stats\n break\n for name, child in mod.named_children():\n module_prefix = get_prefix(prefix) + name if prefix else name\n _get_logger_dict_helper(child, target_dict, module_prefix)", + "docstring": "This is the helper function for get_logger_dict Args: mod: module we want to save all logger stats prefix: prefix for the current module target_dict: the dictionary used to save all logger stats", + "type": "function", + "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite.py", + "ast_data": "FunctionDef name:_get_logger_dict_helper arg:mod arg:target_dict arg:prefix arguments arg arg arg FunctionDef name:get_prefix arg:prefix arguments arg Return return:yes Compare For Call If Call Assign Call For Call Assign Call Call" + }, + { + "library": "pandas", + "name": "not_none", + "source_code": "def not_none(*args):\n return (arg for arg in args if arg is not None)", + "docstring": "Returns a generator consisting of the arguments that are not None.", + "type": "function", + "file_path": "pandas\\pandas\\core\\common.py", + "ast_data": "FunctionDef name:not_none arguments arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "@deprecation.deprecated(None, 'Use tf.keras.mixed_precision.LossScaleOptimizer instead. LossScaleOptimizer now has all the functionality of FixedLossScale')\ndef __init__(self, loss_scale_value):\n super(FixedLossScale, self).__init__()\n if not isinstance(loss_scale_value, (int, float)):\n raise ValueError('loss_scale_value must be a Python int or float.')\n if loss_scale_value < 1:\n raise ValueError('loss_scale_value must be at least 1.')\n self._loss_scale_value = float(loss_scale_value)", + "docstring": "Creates the fixed loss scale. Args: loss_scale_value: A Python float. Its ideal value varies depending on models to run. Choosing a too small loss_scale might affect model quality; a too big loss_scale might cause inf or nan. There is no single right loss_scale to apply. There is no harm choosing a relatively big number as long as no nan or inf is encountered in training. Raises: ValueError: If loss_scale_value is less than 1.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:loss_scale_value arguments arg arg Call Call If Call Raise Call If Compare Raise Call Assign Call Call" + }, + { + "library": "matplotlib", + "name": "set_color", + "source_code": "def set_color(self, color):\n axis = self._axis_map[self._orientation]\n axis.set_tick_params(colors=color)\n for spine in self.spines.values():\n if spine.axis is axis:\n spine.set_color(color)\n axis.label.set_color(color)", + "docstring": "Change the color of the secondary Axes and all decorators. Parameters ---------- color : :mpltype:", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_secondary_axes.py", + "ast_data": "FunctionDef name:set_color arg:self arg:color arguments arg arg Assign Call For Call If Compare Call Call" + }, + { + "library": "tensorflow", + "name": "step_fn", + "source_code": "def step_fn(ctx, inputs):\n gradients_fn = backprop.implicit_grad(self._loss_fn)\n gradients_fn = optimizer_lib.get_filtered_grad_fn(gradients_fn)\n grads_and_vars = self.distribution.extended.call_for_each_replica(gradients_fn, args=(ctx, inputs))\n return self._optimizer._distributed_apply(self.distribution, grads_and_vars)", + "docstring": "Function to run one iteration with one input.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\step_fn.py", + "ast_data": "FunctionDef name:step_fn arg:ctx arg:inputs arguments arg arg Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "EdgeDetector", + "source_code": "class EdgeDetector(Module):\n\n def __init__(self) -> None:\n super().__init__()\n self.model = DexiNed(pretrained=True)\n\n def load(self, path_file: str) -> None:\n self.model.load_from_file(path_file)\n\n def preprocess(self, image: Tensor) -> Tensor:\n return image\n\n def postprocess(self, data: Tensor) -> Tensor:\n return data\n\n def forward(self, image: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(image, ['B', '3', 'H', 'W'])\n img = self.preprocess(image)\n out = self.model(img)\n return self.postprocess(out)", + "docstring": "Detect edges in a given image using a CNN. By default, it uses the method described in :cite:. Return: A tensor of shape :math:. Example: >>> img = torch.rand(1, 3, 320, 320) >>> detect = EdgeDetector() >>> out = detect(img) >>> out.shape torch.Size([1, 1, 320, 320])", + "type": "class", + "file_path": "kornia\\kornia\\contrib\\edge_detection.py", + "ast_data": "ClassDef name:EdgeDetector FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call FunctionDef name:load arg:self arg:path_file arguments arg arg Call FunctionDef name:preprocess arg:self arg:image arguments arg arg Return return:yes FunctionDef name:postprocess arg:self arg:data arguments arg arg Return return:yes FunctionDef name:forward arg:self arg:image arguments arg arg Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_ops_from_ops_list", + "source_code": "def _get_ops_from_ops_list(input_file):\n ops = set()\n ops_list_str = gfile.GFile(input_file, 'r').read()\n if not ops_list_str:\n raise Exception('Input file should not be empty')\n ops_list = json.loads(ops_list_str)\n for op, kernel in ops_list:\n op_and_kernel = (op, kernel if kernel else None)\n ops.add(op_and_kernel)\n return ops", + "docstring": "Gets the ops and kernels needed from the ops list file.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\selective_registration_header_lib.py", + "ast_data": "FunctionDef name:_get_ops_from_ops_list arg:input_file arguments arg Assign Call Assign Call Call If Raise Call Assign Call For Assign Call Return return:yes" + }, + { + "library": "django", + "name": "alter_unique_together", + "source_code": "def alter_unique_together(self, model, old_unique_together, new_unique_together):\n olds = {tuple(fields) for fields in old_unique_together}\n news = {tuple(fields) for fields in new_unique_together}\n for fields in olds.difference(news):\n self._delete_composed_index(model, fields, {'unique': True, 'primary_key': False}, self.sql_delete_unique)\n for field_names in news.difference(olds):\n fields = [model._meta.get_field(field) for field in field_names]\n self.execute(self._create_unique_sql(model, fields))", + "docstring": "Deal with a model changing its unique_together. The input unique_togethers must be doubly-nested, not the single-nested [\"foo\", \"bar\"] format.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\schema.py", + "ast_data": "FunctionDef name:alter_unique_together arg:self arg:model arg:old_unique_together arg:new_unique_together arguments arg arg arg arg Assign Call Assign Call For Call Call For Call Assign Call Call Call" + }, + { + "library": "scrapy", + "name": "flatten", + "source_code": "def flatten(x: Iterable[Any]) -> list[Any]:\n warnings.warn('The flatten function is deprecated and will be removed in a future version of Scrapy.', category=ScrapyDeprecationWarning, stacklevel=2)\n return list(iflatten(x))", + "docstring": "flatten(sequence) -> list Returns a single, flat list which contains all elements retrieved from the sequence and all recursively contained sub-sequences (iterables). Examples: >>> [1, 2, [3,4], (5,6)] [1, 2, [3, 4], (5, 6)] >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)]) [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10] >>> flatten([\"foo\", \"bar\"]) ['foo', 'bar'] >>> flatten([\"foo\", [\"baz\", 42], \"bar\"]) ['foo', 'baz', 42, 'bar']", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\python.py", + "ast_data": "FunctionDef name:flatten arg:x arguments arg Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "get_inverted", + "source_code": "def get_inverted(self):\n low, high = self.get_view_interval()\n return high < low", + "docstring": "Return whether this Axis is oriented in the \"inverse\" direction. The \"normal\" direction is increasing to the right for the x-axis and to the top for the y-axis; the \"inverse\" direction is increasing to the left for the x-axis and to the bottom for the y-axis.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:get_inverted arg:self arguments arg Assign Call Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "parse_branch_ref", + "source_code": "def parse_branch_ref(filename):\n data = open(filename).read().strip()\n items = data.split(' ')\n if len(items) == 1:\n return None\n elif len(items) == 2 and items[0] == 'ref:':\n return items[1].strip()\n else:\n raise RuntimeError('Git directory has unparseable HEAD')", + "docstring": "Given a filename of a .git/HEAD file return ref path. In particular, if git is in detached head state, this will return None. If git is in attached head, it will return the branch reference. E.g. if on 'master', the HEAD will contain 'ref: refs/heads/master' so 'refs/heads/master' will be returned. Example: parse_branch_ref(\".git/HEAD\") Args: filename: file to treat as a git HEAD file Returns: None if detached head, otherwise ref subpath Raises: RuntimeError: if the HEAD file is unparseable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\git\\gen_git_source.py", + "ast_data": "FunctionDef name:parse_branch_ref arg:filename arguments arg Assign Call Call Call Assign Call If Compare Call Return return:no If BoolOp Compare Call Compare Return return:yes Call Raise Call" + }, + { + "library": "pytorch", + "name": "_extract_leaf_events", + "source_code": "@staticmethod\ndef _extract_leaf_events(op_tree: OpTree) -> tuple[_ProfilerEvent, ...]:\n leaf_events: list[_ProfilerEvent] = []\n\n def leaf_op(e: _ProfilerEvent) -> bool:\n return e.typed[0] == _EventType.TorchOp and (e.typed[1].scope == RecordScope.BACKWARD_FUNCTION or bool(SchemaMatcher.match_schemas(e.typed[1])))\n\n def children_fn(e: _ProfilerEvent):\n if leaf_op(e) or e.tag == _EventType.Allocation:\n leaf_events.append(e)\n return []\n return e.children\n for _ in op_tree.dfs(children_fn=children_fn):\n pass\n return tuple(sorted(leaf_events, key=lambda x: x.start_time_ns))", + "docstring": "Partially traverse the op tree and extract top level ops. Consider the following code: The op tree (assuming no Autograd) will look like: TorchOp: \"My annotation\" TorchOp: zero_ TorchOp: fill_ TorchOp: zero_ TorchOp: fill_ The recursive structure of operator calls makes data flow unwieldy. In order to simplify analysis we would like to select the highest level ops to represent in the graph. In this case those are the ops; the fact that is called is an implementation detail. We also do not want to group everything under \"My annotation\" as this could create overly coarse bundles and lose critical semantics. To address this issue we walk over the graph and select the topmost torch ops ** which match at least one operator schema **. These form the leaves of the first pass through the op tree. (As well as any allocations or frees which do are not part of a kernel.) These events form the logical nodes in our data flow graph.", + "type": "method", + "file_path": "pytorch\\torch\\profiler\\_memory_profiler.py", + "ast_data": "FunctionDef name:_extract_leaf_events arg:op_tree arguments arg FunctionDef name:leaf_op arg:e arguments arg Return return:yes BoolOp Compare BoolOp Compare Call Call FunctionDef name:children_fn arg:e arguments arg If BoolOp Call Compare Call Return return:no Return return:yes For Call Return return:yes Call Call arguments arg" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y):\n return self._fit(X, y)", + "docstring": "Fit the k-nearest neighbors classifier from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed' Training data. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs) Target values. Returns ------- self : KNeighborsClassifier The fitted k-nearest neighbors classifier.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neighbors\\_classification.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "RootResults", + "source_code": "class RootResults(OptimizeResult):\n\n def __init__(self, root, iterations, function_calls, flag, method):\n self.root = root\n self.iterations = iterations\n self.function_calls = function_calls\n self.converged = flag == _ECONVERGED\n if flag in flag_map:\n self.flag = flag_map[flag]\n else:\n self.flag = flag\n self.method = method", + "docstring": "Represents the root finding result. Attributes ---------- root : float Estimated root location. iterations : int Number of iterations needed to find the root. function_calls : int Number of times the function was called. converged : bool True if the routine converged. flag : str Description of the cause of termination. method : str Root finding method used.", + "type": "class", + "file_path": "scipy\\scipy\\optimize\\_zeros_py.py", + "ast_data": "ClassDef name:RootResults FunctionDef name:__init__ arg:self arg:root arg:iterations arg:function_calls arg:flag arg:method arguments arg arg arg arg arg arg Assign Assign Assign Assign Compare If Compare Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "_activation_is_memoryless", + "source_code": "def _activation_is_memoryless(qconfig: QConfig):\n\n def _is_memoryless(observer):\n return hasattr(observer, 'averaging_constant') and observer.averaging_constant == 1\n act = qconfig.activation()\n if isinstance(act, FakeQuantizeBase) and hasattr(act, 'activation_post_process'):\n return _is_memoryless(act.activation_post_process)\n else:\n return _is_memoryless(act)", + "docstring": "Return whether the observer for activations defined in the given QConfig is memoryless. This means a MovingAverage observer with averaging constant equal to 1.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\qconfig.py", + "ast_data": "FunctionDef name:_activation_is_memoryless arg:qconfig arguments arg FunctionDef name:_is_memoryless arg:observer arguments arg Return return:yes BoolOp Call Compare Assign Call If BoolOp Call Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "__call__", + "source_code": "def __call__(self, declarations: str | frozenset[tuple[str, str]]) -> dict[str, dict[str, str]]:\n return self._call_cached(declarations)", + "docstring": "Convert CSS declarations to ExcelWriter style. Parameters ---------- declarations : str | frozenset[tuple[str, str]] CSS string or set of CSS declaration tuples. e.g. \"font-weight: bold; background: blue\" or {(\"font-weight\", \"bold\"), (\"background\", \"blue\")} Returns ------- xlstyle : dict A style as interpreted by ExcelWriter when found in ExcelCell.style.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\excel.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:declarations arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "Context", + "source_code": "class Context(BaseContext):\n\n def __init__(self, dict_=None, autoescape=True, use_l10n=None, use_tz=None):\n self.autoescape = autoescape\n self.use_l10n = use_l10n\n self.use_tz = use_tz\n self.template_name = 'unknown'\n self.render_context = RenderContext()\n self.template = None\n super().__init__(dict_)\n\n @contextmanager\n def bind_template(self, template):\n if self.template is not None:\n raise RuntimeError('Context is already bound to a template')\n self.template = template\n try:\n yield\n finally:\n self.template = None\n\n def __copy__(self):\n duplicate = super().__copy__()\n duplicate.render_context = copy(self.render_context)\n return duplicate\n\n def update(self, other_dict):\n if not hasattr(other_dict, '__getitem__'):\n raise TypeError('other_dict must be a mapping (dictionary-like) object.')\n if isinstance(other_dict, BaseContext):\n other_dict = other_dict.dicts[1:].pop()\n return ContextDict(self, other_dict)", + "docstring": "A stack container for variable context", + "type": "class", + "file_path": "django\\django\\template\\context.py", + "ast_data": "ClassDef name:Context FunctionDef name:__init__ arg:self arg:dict_ arg:autoescape arg:use_l10n arg:use_tz arguments arg arg arg arg arg Assign Assign Assign Assign Assign Call Assign Call Call FunctionDef name:bind_template arg:self arg:template arguments arg arg If Compare Raise Call Assign Try Assign FunctionDef name:__copy__ arg:self arguments arg Assign Call Call Assign Call Return return:yes FunctionDef name:update arg:self arg:other_dict arguments arg arg If Call Raise Call If Call Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "start_event_loop", + "source_code": "def start_event_loop(self, timeout=0):\n if timeout <= 0:\n timeout = np.inf\n timestep = 0.01\n counter = 0\n self._looping = True\n while self._looping and counter * timestep < timeout:\n self.flush_events()\n time.sleep(timestep)\n counter += 1", + "docstring": "Start a blocking event loop. Such an event loop is used by interactive functions, such as and , to wait for events. The event loop blocks until a callback function triggers , or *timeout* is reached. If *timeout* is 0 or negative, never timeout. Only interactive backends need to reimplement this method and it relies on being properly implemented. Interactive backends should implement this in a more native way.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:start_event_loop arg:self arg:timeout arguments arg arg If Compare Assign Assign Assign Assign While BoolOp Compare Call Call" + }, + { + "library": "tensorflow", + "name": "_log_ndtr_asymptotic_series", + "source_code": "def _log_ndtr_asymptotic_series(x, series_order):\n dtype = x.dtype.as_numpy_dtype\n if series_order <= 0:\n return np.array(1, dtype)\n x_2 = math_ops.square(x)\n even_sum = array_ops.zeros_like(x)\n odd_sum = array_ops.zeros_like(x)\n x_2n = x_2\n for n in range(1, series_order + 1):\n y = np.array(_double_factorial(2 * n - 1), dtype) / x_2n\n if n % 2:\n odd_sum += y\n else:\n even_sum += y\n x_2n *= x_2\n return 1.0 + even_sum - odd_sum", + "docstring": "Calculates the asymptotic series used in log_ndtr.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\special_math.py", + "ast_data": "FunctionDef name:_log_ndtr_asymptotic_series arg:x arg:series_order arguments arg arg Assign If Compare Return return:yes Call Assign Call Assign Call Assign Call Assign For Call Assign Call Call If Return return:yes" + }, + { + "library": "pandas", + "name": "iloc", + "source_code": "@property\ndef iloc(self) -> _iLocIndexer:\n return _iLocIndexer('iloc', self)", + "docstring": "Purely integer-location based indexing for selection by position. .. versionchanged:: 3.0 Callables which return a tuple are deprecated as input. `Selection by Position slicexslice` objects. >>> df.iloc[1:3, 0:3] a b c 1 100 200 300 2 1000 2000 3000 With a boolean array whose length matches the columns. >>> df.iloc[:, [True, False, True, False]] a c 0 1 3 1 100 300 2 1000 3000 With a callable function that expects the Series or DataFrame. >>> df.iloc[:, lambda df: [0, 2]] a c 0 1 3 1 100 300 2 1000 3000", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexing.py", + "ast_data": "FunctionDef name:iloc arg:self arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "Translate", + "source_code": "class Translate(Module):\n\n def __init__(self, translation: Tensor, mode: str='bilinear', padding_mode: str='zeros', align_corners: bool=True) -> None:\n super().__init__()\n self.translation: Tensor = translation\n self.mode: str = mode\n self.padding_mode: str = padding_mode\n self.align_corners: bool = align_corners\n\n def forward(self, input: Tensor) -> Tensor:\n return translate(input, self.translation, self.mode, self.padding_mode, self.align_corners)", + "docstring": "Translate the tensor in pixel units. Args: translation: tensor containing the amount of pixels to translate in the x and y direction. The tensor must have a shape of (B, 2), where B is batch size, last dimension contains dx dy. mode: interpolation mode to calculate output values ``. align_corners: interpolation flag. Returns: The translated tensor with the same shape as the input. Example: >>> img = torch.rand(1, 3, 4, 4) >>> translation = torch.tensor([[1., 0.]]) >>> out = Translate(translation)(img) >>> print(out.shape) torch.Size([1, 3, 4, 4])", + "type": "class", + "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py", + "ast_data": "ClassDef name:Translate FunctionDef name:__init__ arg:self arg:translation arg:mode arg:padding_mode arg:align_corners arguments arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_get_xy_display", + "source_code": "def _get_xy_display(self):\n x, y = self.get_unitless_position()\n return self.get_transform().transform((x, y))", + "docstring": "Get the (possibly unit converted) transformed x, y in display coords.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:_get_xy_display arg:self arguments arg Assign Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "DeVilliersGlasser01", + "source_code": "class DeVilliersGlasser01(Benchmark):\n\n def __init__(self, dimensions=4):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([1.0] * self.N, [100.0] * self.N))\n self.global_optimum = [[60.137, 1.371, 3.112, 1.761]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n t = 0.1 * arange(24)\n y = 60.137 * 1.371 ** t * sin(3.112 * t + 1.761)\n return sum((x[0] * x[1] ** t * sin(x[2] * t + x[3]) - y) ** 2.0)", + "docstring": "DeVilliers-Glasser 1 objective function. This class defines the DeVilliers-Glasser 1 [1]_ function global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{DeVilliersGlasser01}}(x) = \\sum_{i=1}^{24} \\left[ x_1x_2^{t_i} \\sin(x_3t_i + x_4) - y_i \\right ]^2 Where, in this exercise, :math: and :math:. Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math:. .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_D.py", + "ast_data": "ClassDef name:DeVilliersGlasser01 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_log_wishart_norm", + "source_code": "def _log_wishart_norm(degrees_of_freedom, log_det_precisions_chol, n_features):\n return -(degrees_of_freedom * log_det_precisions_chol + degrees_of_freedom * n_features * 0.5 * math.log(2.0) + np.sum(gammaln(0.5 * (degrees_of_freedom - np.arange(n_features)[:, np.newaxis])), 0))", + "docstring": "Compute the log of the Wishart distribution normalization term. Parameters ---------- degrees_of_freedom : array-like of shape (n_components,) The number of degrees of freedom on the covariance Wishart distributions. log_det_precision_chol : array-like of shape (n_components,) The determinant of the precision matrix for each component. n_features : int The number of features. Return ------ log_wishart_norm : array-like of shape (n_components,) The log normalization of the Wishart distribution.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py", + "ast_data": "FunctionDef name:_log_wishart_norm arg:degrees_of_freedom arg:log_det_precisions_chol arg:n_features arguments arg arg arg Return return:yes Call Call Call Call" + }, + { + "library": "virtualenv", + "name": "creator", + "source_code": "@property\ndef creator(self):\n return self._creator", + "docstring": "The creator used to build the virtual environment (must be compatible with the interpreter).", + "type": "method", + "file_path": "virtualenv\\src\\virtualenv\\run\\session.py", + "ast_data": "FunctionDef name:creator arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "remove_undocumented", + "source_code": "def remove_undocumented(module_name, allowed_exception_list=None, doc_string_modules=None):\n current_symbols = set(dir(_sys.modules[module_name]))\n should_have = make_all(module_name, doc_string_modules)\n should_have += allowed_exception_list or []\n extra_symbols = current_symbols - set(should_have)\n target_module = _sys.modules[module_name]\n for extra_symbol in extra_symbols:\n if extra_symbol.startswith('_'):\n continue\n fully_qualified_name = module_name + '.' + extra_symbol\n _HIDDEN_ATTRIBUTES[fully_qualified_name] = (target_module, getattr(target_module, extra_symbol))\n delattr(target_module, extra_symbol)", + "docstring": "Removes symbols in a module that are not referenced by a docstring. Args: module_name: the name of the module (usually ). allowed_exception_list: a list of names that should not be removed. doc_string_modules: a list of modules from which to take the docstrings. If None, then a list containing only the module named is used. Furthermore, if a symbol previously added with , then it will always be allowed. This is useful for internal tests. Returns: None", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\all_util.py", + "ast_data": "FunctionDef name:remove_undocumented arg:module_name arg:allowed_exception_list arg:doc_string_modules arguments arg arg arg Assign Call Call Assign Call BoolOp Assign Call Assign For If Call Assign Assign Call Call" + }, + { + "library": "tensorflow", + "name": "ControlStatusCtx", + "source_code": "class ControlStatusCtx(object):\n\n def __init__(self, status, options=None):\n self.status = status\n self.options = options\n\n def __enter__(self):\n _control_ctx().append(self)\n return self\n\n def __repr__(self):\n return '{}[status={}, options={}]'.format(self.__class__.__name__, self.status, self.options)\n\n def __exit__(self, unused_type, unused_value, unused_traceback):\n assert _control_ctx()[-1] is self\n _control_ctx().pop()", + "docstring": "A context that tracks whether autograph is enabled by the user.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\ag_ctx.py", + "ast_data": "ClassDef name:ControlStatusCtx FunctionDef name:__init__ arg:self arg:status arg:options arguments arg arg arg Assign Assign FunctionDef name:__enter__ arg:self arguments arg Call Call Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:__exit__ arg:self arg:unused_type arg:unused_value arg:unused_traceback arguments arg arg arg arg Compare Call Call Call" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, X):\n check_is_fitted(self)\n X = validate_data(self, X, accept_sparse=('csr', 'csc'), dtype=[np.float64, np.float32], reset=False, ensure_non_negative=True)\n with config_context(assume_finite=True):\n W, *_ = self._fit_transform(X, H=self.components_, update_H=False)\n return W", + "docstring": "Transform the data X according to the fitted NMF model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. Returns ------- W : ndarray of shape (n_samples, n_components) Transformed data.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py", + "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call With Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "Schwefel36", + "source_code": "class Schwefel36(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([0.0] * self.N, [500.0] * self.N))\n self.custom_bounds = ([0.0, 20.0], [0.0, 20.0])\n self.global_optimum = [[12.0, 12.0]]\n self.fglob = -3456.0\n\n def fun(self, x, *args):\n self.nfev += 1\n return -x[0] * x[1] * (72 - 2 * x[0] - 2 * x[1])", + "docstring": "Schwefel 36 objective function. This class defines the Schwefel 36 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Schwefel36}}(x) = -x_1x_2(72 - 2x_1 - 2x_2) with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", + "ast_data": "ClassDef name:Schwefel36 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes" + }, + { + "library": "django", + "name": "tuple", + "source_code": "@tuple.setter\ndef tuple(self, tup):\n self._cs[0] = tup", + "docstring": "Set the coordinates of the point with the given tuple.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\point.py", + "ast_data": "FunctionDef name:tuple arg:self arg:tup arguments arg arg Assign" + }, + { + "library": "pytorch", + "name": "to_dict", + "source_code": "def to_dict(self) -> dict[str, Any]:\n d: dict[str, Any] = {}\n if len(self.preserved_attributes) > 0:\n d[PRESERVED_ATTRIBUTES_DICT_KEY] = self.preserved_attributes\n return d", + "docstring": "Convert this `~torch.ao.quantization.fx.custom_config.ConvertCustomConfig.from_dict`.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py", + "ast_data": "FunctionDef name:to_dict arg:self arguments arg If Compare Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_snake_case", + "source_code": "def _snake_case(s: str) -> str:\n return _snake_case_sub(s).lower()", + "docstring": "Transforms the given string ``", + "type": "function", + "file_path": "pytorch\\torch\\fx\\graph.py", + "ast_data": "FunctionDef name:_snake_case arg:s arguments arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "url", + "source_code": "@register.tag\ndef url(parser, token):\n bits = token.split_contents()\n if len(bits) < 2:\n raise TemplateSyntaxError(\"'%s' takes at least one argument, a URL pattern name.\" % bits[0])\n viewname = parser.compile_filter(bits[1])\n args = []\n kwargs = {}\n asvar = None\n bits = bits[2:]\n if len(bits) >= 2 and bits[-2] == 'as':\n asvar = bits[-1]\n bits = bits[:-2]\n for bit in bits:\n match = kwarg_re.match(bit)\n if not match:\n raise TemplateSyntaxError('Malformed arguments to url tag')\n name, value = match.groups()\n if name:\n kwargs[name] = parser.compile_filter(value)\n else:\n args.append(parser.compile_filter(value))\n return URLNode(viewname, args, kwargs, asvar)", + "docstring": "Return an absolute URL matching the given view with its parameters. This is a way to define links that aren't tied to a particular URL configuration:: {% url \"url_name\" arg1 arg2 %} or {% url \"url_name\" name1=value1 name2=value2 %} The first argument is a URL pattern name. Other arguments are space-separated values that will be filled in place of positional and keyword arguments in the URL. Don't mix positional and keyword arguments. All arguments for the URL must be present. For example, if you have a view ``. The first argument may also be the name of a template variable that will be evaluated to obtain the view name or the URL name, e.g.:: {% with url_name=\"client-detail-view\" %} {% url url_name client.id %} {% endwith %}", + "type": "function", + "file_path": "django\\django\\template\\defaulttags.py", + "ast_data": "FunctionDef name:url arg:parser arg:token arguments arg arg Assign Call If Compare Call Raise Call Assign Call Assign Assign Assign Assign If BoolOp Compare Call Compare Assign Assign For Assign Call If Raise Call Assign Call If Assign Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "experimental_local_results", + "source_code": "def experimental_local_results(self, value):\n return super(CentralStorageStrategy, self).experimental_local_results(value)", + "docstring": "Returns the list of all local per-replica values contained in . In there is a single worker so the value returned will be all the values on that worker. Args: value: A value returned by , , or a variable created in . Returns: A tuple of values contained in . If represents a single value, this returns", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\central_storage_strategy.py", + "ast_data": "FunctionDef name:experimental_local_results arg:self arg:value arguments arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "set_xy", + "source_code": "def set_xy(self, xy):\n xy = np.asarray(xy)\n nverts, _ = xy.shape\n if self._closed:\n if nverts == 1 or (nverts > 1 and (xy[0] != xy[-1]).any()):\n xy = np.concatenate([xy, [xy[0]]])\n elif nverts > 2 and (xy[0] == xy[-1]).all():\n xy = xy[:-1]\n self._path = Path(xy, closed=self._closed)\n self.stale = True", + "docstring": "Set the vertices of the polygon. Parameters ---------- xy : (N, 2) array-like The coordinates of the vertices. Notes ----- Unlike , we do not ignore the last input vertex. If the polygon is meant to be closed, and the last point of the polygon is not equal to the first, we assume that the user has not explicitly passed a `` vertex, and add it ourselves.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:set_xy arg:self arg:xy arguments arg arg Assign Call Assign If If BoolOp Compare BoolOp Compare Call Compare Assign Call If BoolOp Compare Call Compare Assign Assign Call Assign" + }, + { + "library": "pandas", + "name": "expand", + "source_code": "def expand(self: CSSResolver, prop: str, value: str) -> Generator[tuple[str, str]]:\n tokens = value.split()\n if len(tokens) == 0 or len(tokens) > 3:\n warnings.warn(f'Too many tokens provided to \"{prop}\" (expected 1-3)', CSSWarning, stacklevel=find_stack_level())\n border_declarations = {f'border{side}-color': 'black', f'border{side}-style': 'none', f'border{side}-width': 'medium'}\n for token in tokens:\n if token.lower() in self.BORDER_STYLES:\n border_declarations[f'border{side}-style'] = token\n elif any((ratio in token.lower() for ratio in self.BORDER_WIDTH_RATIOS)):\n border_declarations[f'border{side}-width'] = token\n else:\n border_declarations[f'border{side}-color'] = token\n yield from self.atomize(border_declarations.items())", + "docstring": "Expand border into color, style, and width tuples Parameters ---------- prop : str CSS property name passed to styler value : str Value passed to styler for property Yields ------ Tuple (str, str): Expanded property, value", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\css.py", + "ast_data": "FunctionDef name:expand arg:self arg:prop arg:value arguments arg arg arg Assign Call If BoolOp Compare Call Compare Call Call Call Assign For If Compare Call Assign If Call Compare Call Assign Assign Call Call" + }, + { + "library": "matplotlib", + "name": "count_overlaps", + "source_code": "def count_overlaps(self, bboxes):\n return count_bboxes_overlapping_bbox(self, np.atleast_3d([np.array(x) for x in bboxes]))", + "docstring": "Count the number of bounding boxes that overlap this one. Parameters ---------- bboxes : sequence of", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:count_overlaps arg:self arg:bboxes arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "check", + "source_code": "def check(self, keys) -> bool:\n b64_keys = [self.prefix + self._encode(key) for key in keys]\n kvs = self._try_wait_get(b64_keys, override_timeout=datetime.timedelta(microseconds=1))\n return kvs is not None", + "docstring": "Check if all of the keys are immediately present (without waiting).", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_store.py", + "ast_data": "FunctionDef name:check arg:self arg:keys arguments arg arg Assign Call Assign Call Call Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "lu_reconstruct_assertions", + "source_code": "def lu_reconstruct_assertions(lower_upper, perm, validate_args):\n assertions = []\n message = 'Input `lower_upper` must have at least 2 dimensions.'\n if lower_upper.shape.rank is not None and lower_upper.shape.rank < 2:\n raise ValueError(message)\n elif validate_args:\n assertions.append(check_ops.assert_rank_at_least_v2(lower_upper, rank=2, message=message))\n message = '`rank(lower_upper)` must equal `rank(perm) + 1`'\n if lower_upper.shape.rank is not None and perm.shape.rank is not None:\n if lower_upper.shape.rank != perm.shape.rank + 1:\n raise ValueError(message)\n elif validate_args:\n assertions.append(check_ops.assert_rank(lower_upper, rank=array_ops.rank(perm) + 1, message=message))\n message = '`lower_upper` must be square.'\n if lower_upper.shape[:-2].is_fully_defined():\n if lower_upper.shape[-2] != lower_upper.shape[-1]:\n raise ValueError(message)\n elif validate_args:\n m, n = array_ops.split(array_ops.shape(lower_upper)[-2:], num_or_size_splits=2)\n assertions.append(check_ops.assert_equal(m, n, message=message))\n return assertions", + "docstring": "Returns list of assertions related to assumptions.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linalg_impl.py", + "ast_data": "FunctionDef name:lu_reconstruct_assertions arg:lower_upper arg:perm arg:validate_args arguments arg arg arg Assign Assign If BoolOp Compare Compare Raise Call If Call Call Assign If BoolOp Compare Compare If Compare Raise Call If Call Call Call Assign If Call If Compare Raise Call If Assign Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_states", + "source_code": "def get_states(self):\n stamp_token, num_trees, num_finalized_trees, num_attempted_layers, nodes_range = gen_boosted_trees_ops.boosted_trees_get_ensemble_states(self.resource_handle)\n return (array_ops.identity(stamp_token, name='stamp_token'), array_ops.identity(num_trees, name='num_trees'), array_ops.identity(num_finalized_trees, name='num_finalized_trees'), array_ops.identity(num_attempted_layers, name='num_attempted_layers'), array_ops.identity(nodes_range, name='last_layer_nodes_range'))", + "docstring": "Returns states of the tree ensemble. Returns: stamp_token, num_trees, num_finalized_trees, num_attempted_layers and range of the nodes in the latest layer.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\boosted_trees_ops.py", + "ast_data": "FunctionDef name:get_states arg:self arguments arg Assign Call Return return:yes Call Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "_svd", + "source_code": "def _svd(self, array, n_components, n_discard):\n if self.svd_method == 'randomized':\n kwargs = {}\n if self.n_svd_vecs is not None:\n kwargs['n_oversamples'] = self.n_svd_vecs\n u, _, vt = _randomized_svd(array, n_components, random_state=self.random_state, **kwargs)\n elif self.svd_method == 'arpack':\n u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)\n if np.any(np.isnan(vt)):\n A = safe_sparse_dot(array.T, array)\n random_state = check_random_state(self.random_state)\n v0 = random_state.uniform(-1, 1, A.shape[0])\n _, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)\n vt = v.T\n if np.any(np.isnan(u)):\n A = safe_sparse_dot(array, array.T)\n random_state = check_random_state(self.random_state)\n v0 = random_state.uniform(-1, 1, A.shape[0])\n _, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)\n assert_all_finite(u)\n assert_all_finite(vt)\n u = u[:, n_discard:]\n vt = vt[n_discard:]\n return (u, vt.T)", + "docstring": "Returns first left and right singular vectors u and v, discarding the first .", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cluster\\_bicluster.py", + "ast_data": "FunctionDef name:_svd arg:self arg:array arg:n_components arg:n_discard arguments arg arg arg arg If Compare Assign If Compare Assign Assign Call If Compare Assign Call If Call Call Assign Call Assign Call Assign Call Assign Call Assign If Call Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "bench_scikit_tree_classifier", + "source_code": "def bench_scikit_tree_classifier(X, Y):\n from sklearn.tree import DecisionTreeClassifier\n gc.collect()\n tstart = datetime.now()\n clf = DecisionTreeClassifier()\n clf.fit(X, Y).predict(X)\n delta = datetime.now() - tstart\n scikit_classifier_results.append(delta.seconds + delta.microseconds / mu_second)", + "docstring": "Benchmark with scikit-learn decision tree classifier", + "type": "function", + "file_path": "scikit-learn\\benchmarks\\bench_tree.py", + "ast_data": "FunctionDef name:bench_scikit_tree_classifier arg:X arg:Y arguments arg arg Call Assign Call Assign Call Call Call Assign Call Call" + }, + { + "library": "pandas", + "name": "_truncate_horizontally", + "source_code": "def _truncate_horizontally(self) -> None:\n assert self.max_cols_fitted is not None\n col_num = self.max_cols_fitted // 2\n if col_num >= 1:\n _len = len(self.tr_frame.columns)\n _slice = np.hstack([np.arange(col_num), np.arange(_len - col_num, _len)])\n self.tr_frame = self.tr_frame.iloc[:, _slice]\n if isinstance(self.formatters, (list, tuple)):\n self.formatters = [*self.formatters[:col_num], *self.formatters[-col_num:]]\n else:\n col_num = cast(int, self.max_cols)\n self.tr_frame = self.tr_frame.iloc[:, :col_num]\n self.tr_col_num: int = col_num", + "docstring": "Remove columns, which are not to be displayed and adjust formatters. Attributes affected: - tr_frame - formatters - tr_col_num", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\format.py", + "ast_data": "FunctionDef name:_truncate_horizontally arg:self arguments arg Compare Assign If Compare Assign Call Assign Call Call Call Assign If Call Assign Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "_update_docstring", + "source_code": "def _update_docstring(old_str, append_str):\n old_str = old_str or ''\n old_str_lines = old_str.split('\\n')\n append_str = '\\n'.join((' %s' % line for line in append_str.split('\\n')))\n has_args_ix = [ix for ix, line in enumerate(old_str_lines) if line.strip().lower() == 'args:']\n if has_args_ix:\n final_args_ix = has_args_ix[-1]\n return '\\n'.join(old_str_lines[:final_args_ix]) + '\\n\\n' + append_str + '\\n\\n' + '\\n'.join(old_str_lines[final_args_ix:])\n else:\n return old_str + '\\n\\n' + append_str", + "docstring": "Update old_str by inserting append_str just before the \"Args:\" section.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py", + "ast_data": "FunctionDef name:_update_docstring arg:old_str arg:append_str arguments arg arg Assign BoolOp Assign Call Assign Call Call Assign Call Compare Call Call If Assign Return return:yes Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_iter_collection_raw_paths", + "source_code": "def _iter_collection_raw_paths(self, master_transform, paths, all_transforms):\n Npaths = len(paths)\n Ntransforms = len(all_transforms)\n N = max(Npaths, Ntransforms)\n if Npaths == 0:\n return\n transform = transforms.IdentityTransform()\n for i in range(N):\n path = paths[i % Npaths]\n if Ntransforms:\n transform = Affine2D(all_transforms[i % Ntransforms])\n yield (path, transform + master_transform)", + "docstring": "Helper method (along with ) to implement in a memory-efficient manner. This method yields all of the base path/transform combinations, given a master transform, a list of paths and list of transforms. The arguments should be exactly what is passed in to . The backend should take each yielded path and transform and create an object that can be referenced (reused) later.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:_iter_collection_raw_paths arg:self arg:master_transform arg:paths arg:all_transforms arguments arg arg arg arg Assign Call Assign Call Assign Call If Compare Return return:no Assign Call For Call Assign If Assign Call" + }, + { + "library": "tensorflow", + "name": "flat_values", + "source_code": "@property\ndef flat_values(self):\n rt_values = self.values\n while isinstance(rt_values, RaggedTensorValue):\n rt_values = rt_values.values\n return rt_values", + "docstring": "The innermost array for this ragged tensor value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_value.py", + "ast_data": "FunctionDef name:flat_values arg:self arguments arg Assign While Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "parse_example_spec", + "source_code": "@property\ndef parse_example_spec(self):\n return self.categorical_column.parse_example_spec", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Return return:yes" + }, + { + "library": "cherrypy", + "name": "restart", + "source_code": "def restart(self):\n self.execv = True\n self.exit()", + "docstring": "Restart the process (may close connections). This method does not restart the process from the calling thread; instead, it stops the bus and asks the main thread to call execv.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\wspbus.py", + "ast_data": "FunctionDef name:restart arg:self arguments arg Assign Call" + }, + { + "library": "scipy", + "name": "set_global_backend", + "source_code": "def set_global_backend(backend, coerce=False, only=False, *, try_last=False):\n _uarray.set_global_backend(backend, coerce, only, try_last)", + "docstring": "This utility method replaces the default backend for permanent use. It will be tried in the list of backends automatically, unless the `set_backend`, the global backend is tried after registered backends. See Also -------- set_backend: A context manager that allows setting of backends. skip_backend: A context manager that allows skipping of backends.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py", + "ast_data": "FunctionDef name:set_global_backend arg:backend arg:coerce arg:only arguments arg arg arg arg Call" + }, + { + "library": "pandas", + "name": "_construct_result", + "source_code": "def _construct_result(self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable, other: AnyArrayLike | DataFrame) -> Series | tuple[Series, Series]:\n if isinstance(result, tuple):\n res1 = self._construct_result(result[0], name=name, other=other)\n res2 = self._construct_result(result[1], name=name, other=other)\n assert isinstance(res1, Series)\n assert isinstance(res2, Series)\n return (res1, res2)\n dtype = getattr(result, 'dtype', None)\n out = self._constructor(result, index=self.index, dtype=dtype, copy=False)\n out = out.__finalize__(self)\n out = out.__finalize__(other)\n out.name = name\n return out", + "docstring": "Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label other : Series, DataFrame or array-like Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series.", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:_construct_result arg:self arg:result arg:name arg:other arguments arg arg arg arg If Call Assign Call Assign Call Call Call Return return:yes Assign Call Assign Call Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "Ones", + "source_code": "class Ones(Initializer):\n\n def __call__(self, shape, dtype=None, **kwargs):\n _validate_kwargs(self.__class__.__name__, kwargs)\n dtype = _get_dtype(dtype)\n if not dtype.is_numpy_compatible or dtype == dtypes.string:\n raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype)\n if _PARTITION_SHAPE in kwargs:\n shape = kwargs[_PARTITION_SHAPE]\n return array_ops.ones(shape, dtype)", + "docstring": "Initializer that generates tensors initialized to 1. Also available via the shortcut function . Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.Ones() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.Ones() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py", + "ast_data": "ClassDef name:Ones FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If BoolOp Compare Raise Call If Compare Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_hatch_color", + "source_code": "def set_hatch_color(self, hatch_color):\n self._hatch_color = hatch_color", + "docstring": "Set the hatch color.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:set_hatch_color arg:self arg:hatch_color arguments arg arg Assign" + }, + { + "library": "numpy", + "name": "_deprecate_argsort_axis", + "source_code": "def _deprecate_argsort_axis(arr):\n if arr.ndim <= 1:\n return -1\n else:\n warnings.warn('In the future the default for argsort will be axis=-1, not the current None, to match its documentation and np.argsort. Explicitly pass -1 or None to silence this warning.', MaskedArrayFutureWarning, stacklevel=3)\n return None", + "docstring": "Adjust the axis passed to argsort, warning if necessary Parameters ---------- arr The array which argsort was called on np.ma.argsort has a long-term bug where the default of the axis argument is wrong (gh-8701), which now must be kept for backwards compatibility. Thankfully, this only makes a difference when arrays are 2- or more- dimensional, so we only need a warning then.", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:_deprecate_argsort_axis arg:arr arguments arg If Compare Return return:yes Call Return return:no" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n check_is_fitted(self)\n K_star = self.kernel_(self.X_train_, X)\n f_star = K_star.T.dot(self.y_train_ - self.pi_)\n return np.where(f_star > 0, self.classes_[1], self.classes_[0])", + "docstring": "Perform classification on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated for classification. Returns ------- C : ndarray of shape (n_samples,) Predicted target values for X, values are from ``", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\_gpc.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Assign Call Return return:yes Call Compare" + }, + { + "library": "tensorflow", + "name": "EvalOutput", + "source_code": "class EvalOutput(_SupervisedOutput):\n\n def _get_signature_def_fn(self):\n return unexported_signature_utils.supervised_eval_signature_def", + "docstring": "Represents the output of a supervised eval process. This class generates the appropriate signature def for exporting eval output by type-checking and wrapping loss, predictions, and metrics values.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py", + "ast_data": "ClassDef name:EvalOutput FunctionDef name:_get_signature_def_fn arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "__call__", + "source_code": "def __call__(self, *inputs: Any, input_names_to_handle: Optional[List[Any]]=None, output_type: str='tensor', **kwargs: Any) -> Any:\n if not self._disable_features:\n decorated_forward = self.convert_input_output(input_names_to_handle=input_names_to_handle, output_type=output_type)(super(ImageSequential, self).__call__)\n _output_image = decorated_forward(*inputs, **kwargs)\n if len(inputs) == 1 and isinstance(inputs[0], dict):\n original_keys, in_data_keys, inputs, invalid_data = self._preproc_dict_data(inputs[0])\n else:\n in_data_keys = kwargs.get('data_keys', self.data_keys)\n data_keys = self.transform_op.preproc_datakeys(in_data_keys)\n if len(data_keys) > 1 and DataKey.INPUT in data_keys:\n idx = data_keys.index(DataKey.INPUT)\n if output_type == 'tensor':\n self._output_image = _output_image\n if isinstance(_output_image, dict):\n self._output_image[original_keys[idx]] = _output_image[original_keys[idx]]\n else:\n self._output_image[idx] = _output_image[idx]\n elif isinstance(_output_image, dict):\n self._output_image[original_keys[idx]] = _output_image[original_keys[idx]]\n else:\n self._output_image[idx] = _output_image[idx]\n else:\n self._output_image = _output_image\n else:\n _output_image = super(ImageSequential, self).__call__(*inputs, **kwargs)\n return _output_image", + "docstring": "Overwrite the __call__ function to handle various inputs. Args: inputs: Inputs to operate on. input_names_to_handle: List of input names to convert, if None, handle all inputs. output_type: Desired output type ('tensor', 'numpy', or 'pil'). kwargs: Additional arguments. Returns: Callable: Decorated function with converted input and output types.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\augment.py", + "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg arg arg If Assign Call Call Call Assign Call If BoolOp Compare Call Call Assign Call Assign Call Assign Call If BoolOp Compare Call Compare Assign Call If Compare Assign If Call Assign Assign If Call Assign Assign Assign Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "can_zoom", + "source_code": "def can_zoom(self):\n return True", + "docstring": "Return whether this Axes supports the zoom box button functionality.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:can_zoom arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "get_default_static_quant_module_mappings", + "source_code": "def get_default_static_quant_module_mappings() -> dict[Callable, Any]:\n return copy.deepcopy(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS)", + "docstring": "Get module mapping for post training static quantization", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py", + "ast_data": "FunctionDef name:get_default_static_quant_module_mappings arguments Return return:yes Call" + }, + { + "library": "pytorch", + "name": "Flatten", + "source_code": "@dataclass\nclass Flatten(DimSpec):\n input_dims: Sequence[DimSpec]\n\n @classmethod\n def new(cls, dims: Sequence[DimSpec]) -> DimSpec:\n if len(dims) == 0:\n return Singleton()\n elif len(dims) == 1:\n return dims[0]\n else:\n return Flatten(dims)\n\n def inputs(self) -> Iterable[DimSpec]:\n return self.input_dims", + "docstring": "Flatten a set of input dimensions, ensuring right-most adjacent elements remain adjacent in the output.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_view_ops.py", + "ast_data": "ClassDef name:Flatten FunctionDef name:new arg:cls arg:dims arguments arg arg If Compare Call Return return:yes Call If Compare Call Return return:yes Return return:yes Call FunctionDef name:inputs arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_colocation_dict", + "source_code": "@property\ndef _colocation_dict(self) -> dict[str, traceable_stack.TraceableObject]:\n locations_dict = self._colocation_code_locations or {}\n return locations_dict.copy()", + "docstring": "Code locations for colocation context managers active at op creation. This property will return a dictionary for which the keys are nodes with which this Operation is colocated, and for which the values are traceable_stack.TraceableObject instances. The TraceableObject instances record the location of the relevant colocation context manager but have the \"obj\" field set to None to prevent leaking private data. For example, suppose file_a contained these lines: file_a.py: 14: node_a = tf.constant(3, name='NODE_A') 15: with tf.compat.v1.colocate_with(node_a): 16: node_b = tf.constant(4, name='NODE_B') Then a TraceableObject t_obj representing the colocation context manager would have these member values: t_obj.obj -> None t_obj.filename = 'file_a.py' t_obj.lineno = 15 and node_b.op._colocation_dict would return the dictionary { 'NODE_A': t_obj } Returns: {str: traceable_stack.TraceableObject} as per this method's description, above.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_colocation_dict arg:self arguments arg Assign BoolOp Return return:yes Call" + }, + { + "library": "numpy", + "name": "nmask", + "source_code": "def nmask(x):\n if x is masked:\n return True\n return getmask(x)", + "docstring": "Returns the mask, True if ``.", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:nmask arg:x arguments arg If Compare Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_initialize_uninitialized_variables", + "source_code": "def _initialize_uninitialized_variables(self, initializers):\n if not initializers:\n return\n var_is_initialized = _evaluate_var_is_initialized([v for v, _ in initializers])\n\n def initialize_variables():\n op_map = object_identity.ObjectIdentityDictionary()\n inits = []\n for (v, init), is_initialized in zip(initializers, var_is_initialized):\n with ops.init_scope():\n if is_initialized:\n continue\n inits.append(init)\n if inits:\n op_map = lift_to_graph.lift_to_graph(inits, ops.get_default_graph(), op_map=op_map)\n for (v, init), is_initialized in zip(initializers, var_is_initialized):\n with ops.init_scope():\n if is_initialized:\n continue\n v.assign(op_map[init], read_value=False)\n with ops.init_scope():\n options = tracing_compilation.TracingOptions(initialize_variables, 'initialize_variables', autograph=False)\n return tracing_compilation.call_function(tracing_options=options)", + "docstring": "Make and call a which initializes variables.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py", + "ast_data": "FunctionDef name:_initialize_uninitialized_variables arg:self arg:initializers arguments arg arg If Return return:no Assign Call FunctionDef name:initialize_variables arguments Assign Call Assign For Call With Call If Call If Assign Call Call For Call With Call If Call With Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_validate_kwargs", + "source_code": "def _validate_kwargs(self, kwargs):\n if kwargs.get('write_grads', False):\n logging.warning('`write_grads` will be ignored in TensorFlow 2.0 for the `TensorBoard` Callback.')\n if kwargs.get('batch_size', False):\n logging.warning('`batch_size` is no longer needed in the `TensorBoard` Callback and will be ignored in TensorFlow 2.0.')\n if kwargs.get('embeddings_layer_names', False):\n logging.warning('`embeddings_layer_names` is not supported in TensorFlow 2.0. Instead, all `Embedding` layers will be visualized.')\n if kwargs.get('embeddings_data', False):\n logging.warning('`embeddings_data` is not supported in TensorFlow 2.0. Instead, all `Embedding` variables will be visualized.')\n unrecognized_kwargs = set(kwargs.keys()) - {'write_grads', 'embeddings_layer_names', 'embeddings_data', 'batch_size'}\n if unrecognized_kwargs:\n raise ValueError('Unrecognized arguments in `TensorBoard` Callback: ' + str(unrecognized_kwargs))", + "docstring": "Handle arguments were supported in V1.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:_validate_kwargs arg:self arg:kwargs arguments arg arg If Call Call If Call Call If Call Call If Call Call Assign Call Call If Raise Call Call" + }, + { + "library": "scikit-learn", + "name": "fit_transform", + "source_code": "def fit_transform(self, X, y=None):\n return self.fit(X).transform(X)", + "docstring": "Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training set. y : Ignored Not used, present for API consistency by convention. Returns ------- Xt : sparse matrix of shape (n_samples, n_samples) Xt[i, j] is assigned the weight of edge that connects i to j. Only the neighbors have an explicit value. The diagonal is always explicit. The matrix is of CSR format.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neighbors\\_graph.py", + "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "rsample", + "source_code": "def rsample(self, sample_shape: _size=torch.Size(), max_try_correction=None) -> Tensor:\n if max_try_correction is None:\n max_try_correction = 3 if torch._C._get_tracing_state() else 10\n sample_shape = torch.Size(sample_shape)\n sample = self._bartlett_sampling(sample_shape)\n is_singular = self.support.check(sample)\n if self._batch_shape:\n is_singular = is_singular.amax(self._batch_dims)\n if torch._C._get_tracing_state():\n for _ in range(max_try_correction):\n sample_new = self._bartlett_sampling(sample_shape)\n sample = torch.where(is_singular, sample_new, sample)\n is_singular = ~self.support.check(sample)\n if self._batch_shape:\n is_singular = is_singular.amax(self._batch_dims)\n elif is_singular.any():\n warnings.warn('Singular sample detected.')\n for _ in range(max_try_correction):\n sample_new = self._bartlett_sampling(is_singular[is_singular].shape)\n sample[is_singular] = sample_new\n is_singular_new = ~self.support.check(sample_new)\n if self._batch_shape:\n is_singular_new = is_singular_new.amax(self._batch_dims)\n is_singular[is_singular.clone()] = is_singular_new\n if not is_singular.any():\n break\n return sample", + "docstring": ".. warning:: In some cases, sampling algorithm based on Bartlett decomposition may return singular matrix samples. Several tries to correct singular samples are performed by default, but it may end up returning singular matrix samples. Singular samples may return values in . In those cases, the user should validate the samples and either fix the value of or adjust value for argument in accordingly.", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\wishart.py", + "ast_data": "FunctionDef name:rsample arg:self arg:sample_shape arg:max_try_correction arguments arg arg arg Call If Compare Assign Call Assign Call Assign Call Assign Call If Assign Call If Call For Call Assign Call Assign Call Assign Call If Assign Call If Call Call For Call Assign Call Assign Assign Call If Assign Call Assign Call If Call Return return:yes" + }, + { + "library": "pandas", + "name": "register_vcs_handler", + "source_code": "def register_vcs_handler(vcs, method):\n\n def decorate(f):\n if vcs not in HANDLERS:\n HANDLERS[vcs] = {}\n HANDLERS[vcs][method] = f\n return f\n return decorate", + "docstring": "Create decorator to mark a method as the handler of a VCS.", + "type": "function", + "file_path": "pandas\\pandas\\_version.py", + "ast_data": "FunctionDef name:register_vcs_handler arg:vcs arg:method arguments arg arg FunctionDef name:decorate arg:f arguments arg If Compare Assign Assign Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "map", + "source_code": "def map(self, mapper, na_action: Literal['ignore'] | None=None):\n return map_array(self, mapper, na_action=na_action)", + "docstring": "Map values using an input mapping or function. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NA values, without passing them to the mapping correspondence. If 'ignore' is not supported, a `` should be raised. Returns ------- Union[ndarray, Index, ExtensionArray] The output of the mapping function applied to the array. If the function returns a tuple with more than one element a MultiIndex will be returned.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:map arg:self arg:mapper arg:na_action arguments arg arg arg Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "next", + "source_code": "def next(self):\n data = self.rfile.next()\n self.bytes_read += len(data)\n return data", + "docstring": "Return next portion of bytes from the iterated file.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py", + "ast_data": "FunctionDef name:next arg:self arguments arg Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "ObjectIdentityDictionary", + "source_code": "class ObjectIdentityDictionary(collections_abc.MutableMapping):\n __slots__ = ['_storage']\n\n def __init__(self):\n self._storage = {}\n\n def _wrap_key(self, key):\n return _ObjectIdentityWrapper(key)\n\n def __getitem__(self, key):\n return self._storage[self._wrap_key(key)]\n\n def __setitem__(self, key, value):\n self._storage[self._wrap_key(key)] = value\n\n def __delitem__(self, key):\n del self._storage[self._wrap_key(key)]\n\n def __len__(self):\n return len(self._storage)\n\n def __iter__(self):\n for key in self._storage:\n yield key.unwrapped\n\n def __repr__(self):\n return 'ObjectIdentityDictionary(%s)' % repr(self._storage)", + "docstring": "A mutable mapping data structure which compares using \"is\". This is necessary because we have trackable objects (_ListWrapper) which have behavior identical to built-in Python lists (including being unhashable and comparing based on the equality of their contents by default).", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\util\\object_identity.py", + "ast_data": "ClassDef name:ObjectIdentityDictionary Assign FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:_wrap_key arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg Assign Call FunctionDef name:__delitem__ arg:self arg:key arguments arg arg Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call FunctionDef name:__iter__ arg:self arguments arg For FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "is_consistent", + "source_code": "@compatibility(is_backward_compatible=False)\ndef is_consistent(t1, t2):\n if t1 == t2:\n return True\n if t1 == Dyn or t2 == Dyn or isinstance(t1, Var) or isinstance(t2, Var):\n return True\n if isinstance(t1, TensorType) and isinstance(t2, TensorType):\n return len(t1.__args__) == len(t2.__args__) and all((is_consistent(elem1, elem2) for elem1, elem2 in zip(t1.__args__, t2.__args__)))\n else:\n return False", + "docstring": "A binary relation denoted by ~ that determines if t1 is consistent with t2. The relation is reflexive, symmetric but not transitive. returns True if t1 and t2 are consistent and False otherwise. Example: Dyn ~ TensorType((1,2,3)) int ~ Dyn int ~ int TensorType((1,Dyn,3)) ~ TensorType((1,2,3))", + "type": "function", + "file_path": "pytorch\\torch\\fx\\tensor_type.py", + "ast_data": "FunctionDef name:is_consistent arg:t1 arg:t2 arguments arg arg If Compare Return return:yes If BoolOp Compare Compare Call Call Return return:yes If BoolOp Call Call Return return:yes BoolOp Compare Call Call Call Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "squared_norm", + "source_code": "def squared_norm(x):\n x = np.ravel(x, order='K')\n if np.issubdtype(x.dtype, np.integer):\n warnings.warn('Array type is integer, np.dot may overflow. Data should be float type to avoid this issue', UserWarning)\n return np.dot(x, x)", + "docstring": "Squared Euclidean or Frobenius norm of x. Faster than norm(x) ** 2. Parameters ---------- x : array-like The input array which could be either be a vector or a 2 dimensional array. Returns ------- float The Euclidean norm when x is a vector, the Frobenius norm when x is a matrix (2-d array).", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\extmath.py", + "ast_data": "FunctionDef name:squared_norm arg:x arguments arg Assign Call If Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "to_onnx", + "source_code": "def to_onnx(self, onnx_name: Optional[str]=None, include_pre_and_post_processor: bool=True, save: bool=True, additional_metadata: Optional[list[tuple[str, str]]]=None, **kwargs: Any) -> onnx.ModelProto:\n if onnx_name is None:\n onnx_name = f'kornia_{self.name}.onnx'\n if not include_pre_and_post_processor:\n self._add_metadata(self.model, additional_metadata)\n if save:\n self._export(self.model, onnx_name, **kwargs)\n return self.model\n self._add_metadata(self._combined_op, additional_metadata)\n if save:\n self._export(self._combined_op, onnx_name, **kwargs)\n return self._combined_op", + "docstring": "Export a depth estimation model to ONNX format. Args: onnx_name: The name of the output ONNX file. If not provided, a default name in the format \"Kornia-.onnx\" will be used. include_pre_and_post_processor: Whether to include the pre-processor and post-processor in the exported model. save: If to save the model or load it. additional_metadata: Additional metadata to add to the ONNX model. kwargs: Additional arguments to convert to onnx.", + "type": "method", + "file_path": "kornia\\kornia\\models\\_hf_models\\hf_onnx_community.py", + "ast_data": "FunctionDef name:to_onnx arg:self arg:onnx_name arg:include_pre_and_post_processor arg:save arg:additional_metadata arguments arg arg arg arg arg arg If Compare Assign If Call If Call Return return:yes Call If Call Return return:yes" + }, + { + "library": "pygame", + "name": "array3d", + "source_code": "def array3d(surface):\n width, height = surface.get_size()\n array = numpy.empty((width, height, 3), numpy.uint8)\n surface_to_array(array, surface)\n return array", + "docstring": "pygame.surfarray.array3d(Surface): return array copy pixels into a 3d array Copy the pixels from a Surface into a 3D array. The bit depth of the surface will control the size of the integer values, and will work for any type of pixel format. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method).", + "type": "function", + "file_path": "pygame\\src_py\\surfarray.py", + "ast_data": "FunctionDef name:array3d arg:surface arguments arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "replace_flat_tensors_for_gradients", + "source_code": "def replace_flat_tensors_for_gradients(xs, flat_grads):\n xs_structure = [_get_tensors_for_gradient(x) for x in xs]\n grads = nest.pack_sequence_as(xs_structure, flat_grads)\n return [_replace_tensors_for_gradient(x, grad) for x, grad in zip(xs, grads)]", + "docstring": "Replaces Tensors that should be differentiated in with . Args: xs: A list of s or s. flat_grads: A list of . Returns: A list of or .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor_gradient.py", + "ast_data": "FunctionDef name:replace_flat_tensors_for_gradients arg:xs arg:flat_grads arguments arg arg Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_conv_add_relu_extra_inputs_getter_left", + "source_code": "def _conv_add_relu_extra_inputs_getter_left(pattern):\n _relu, add_pattern = pattern\n _, _conv, extra_input = add_pattern\n return [extra_input]", + "docstring": "get inputs pattern for extra inputs, inputs for root node are assumed to be copied over from root node to the fused node", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\onednn.py", + "ast_data": "FunctionDef name:_conv_add_relu_extra_inputs_getter_left arg:pattern arguments arg Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "WriteSourceFile", + "source_code": "def WriteSourceFile(self, source_file):\n debug_event = debug_event_pb2.DebugEvent(source_file=source_file)\n self._EnsureTimestampAdded(debug_event)\n _pywrap_debug_events_writer.WriteSourceFile(self._dump_root, debug_event)", + "docstring": "Write a SourceFile proto with the writer. Args: source_file: A SourceFile proto, describing the content of a source file involved in the execution of the debugged TensorFlow program.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_writer.py", + "ast_data": "FunctionDef name:WriteSourceFile arg:self arg:source_file arguments arg arg Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "get_initialization_function", + "source_code": "def get_initialization_function(self, *args, **kwargs):\n with self._lock:\n if self._variable_creation_config is not None:\n raise RuntimeError('get_initialization_function cannot be called after the function has been used')\n initializers = []\n self._initialize(args, kwargs, add_initializers_to=initializers)\n\n def initialize_variables():\n for v, init in initializers:\n v.assign(lift_to_graph.lift_to_graph([init], ops.get_default_graph())[init], read_value=False)\n options = tracing_compilation.TracingOptions(initialize_variables, 'initialize_variables')\n return tracing_compilation.trace_function(tracing_options=options)", + "docstring": "Returns a which initializes this function's variables. Requires that this function hasn't been accessed yet through either calling it or calling get_concrete_function. Fails if we cannot build an initializer function which does not depend on the concrete values of the inputs to this function. Note that running this function will overwrite any values currently assigned to variables, for example restores from a checkpoint. Args: *args: arguments to the underlying python callable. **kwargs: keyword arguments to the python callable. Returns: A object which initializes the variables of this function. Raises: RuntimeError: if called after the variables have been initialized.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py", + "ast_data": "FunctionDef name:get_initialization_function arg:self arguments arg arg arg With If Compare Raise Call Assign Call FunctionDef name:initialize_variables arguments For Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_color", + "source_code": "def set_color(self, c):\n self.set_edgecolor(c)\n self.stale = True", + "docstring": "Set the edgecolor. Parameters ---------- c : :mpltype: Notes ----- This method does not modify the facecolor (which defaults to \"none\"), unlike the method defined in the parent class. Use to set the facecolor.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\spines.py", + "ast_data": "FunctionDef name:set_color arg:self arg:c arguments arg arg Call Assign" + }, + { + "library": "scikit-learn", + "name": "_find_permutation", + "source_code": "def _find_permutation(a, b):\n t = np.argsort(a)\n u = np.argsort(b)\n u_ = _inverse_permutation(u)\n return t[u_]", + "docstring": "Find the permutation from a to b.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\datasets\\_rcv1.py", + "ast_data": "FunctionDef name:_find_permutation arg:a arg:b arguments arg arg Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self, fget, fset=None):\n self.fget = fget\n self.fset = fset", + "docstring": "Initialize a class property descriptor. Instantiated by ``.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_helper.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:fget arg:fset arguments arg arg arg Assign Assign" + }, + { + "library": "pytorch", + "name": "_no_hook", + "source_code": "@contextmanager\ndef _no_hook(module: nn.Module, user_ctx: Optional[AbstractContextManager]=None):\n with user_ctx if user_ctx else nullcontext():\n orig_enable_hook = checkpoint.state(module).enable_hook\n checkpoint.state(module).enable_hook = False\n try:\n yield\n finally:\n checkpoint.state(module).enable_hook = orig_enable_hook", + "docstring": "Disable hooks installed by checkpoint to avoid unintentional recursion during backward recomputation.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_composable\\checkpoint_activation.py", + "ast_data": "FunctionDef name:_no_hook arg:module arg:user_ctx arguments arg arg With Call Assign Call Assign Call Try Assign Call" + }, + { + "library": "pytorch", + "name": "increment_step", + "source_code": "@classmethod\ndef increment_step(cls, requester: str) -> int:\n if requester not in cls._step_dict:\n cls.init_step_count(requester)\n cls._step_dict[requester] += 1\n new_step = max(cls._step_dict.values())\n if new_step > cls._current_step:\n delta = new_step - cls._current_step\n if delta > 1:\n warn(f'Profiler step count has increased more than 1 - current_step = {cls._current_step} step dict = {cls._step_dict}')\n for _ in range(0, delta):\n _kineto_step()\n cls._current_step = new_step\n return cls._current_step", + "docstring": "Increments the step count for the requester. Additionally if the max over all step counts has incremented then trigger the _kineto_step() returns global step count", + "type": "method", + "file_path": "pytorch\\torch\\autograd\\profiler.py", + "ast_data": "FunctionDef name:increment_step arg:cls arg:requester arguments arg arg If Compare Call Assign Call Call If Compare Assign If Compare Call For Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "MatchContext", + "source_code": "class MatchContext:\n outputs: list[Optional[PatternExpr]]\n pattern_to_node: dict[PatternExpr, Optional[torch.fx.Node]]\n graph: torch.fx.Graph\n exclusive_node_set: list[NodeOrConstant]\n\n def __init__(self, outputs: list[Optional[PatternExpr]], pattern_to_node: Optional[dict[PatternExpr, torch.fx.Node]]=None, *, graph: torch.fx.Graph) -> None:\n self.outputs = outputs\n self.pattern_to_node = {} if pattern_to_node is None else dict(pattern_to_node)\n self.graph = graph\n self.exclusive_node_set = []\n\n def match(self, pattern: PatternExpr, node: NodeOrConstant) -> MatchResult:\n if pattern in self.pattern_to_node:\n if self.pattern_to_node[pattern] == node:\n return Match(self, pattern)\n else:\n return FailedMatch('repeated pattern differs')\n m = pattern._match(node, self)\n assert pattern not in self.pattern_to_node\n self.pattern_to_node[pattern] = node if m else None\n return m\n\n def filter_multi_user_patterns(self) -> dict[PatternExpr, torch.fx.Node]:\n return {pattern: node for pattern, node in self.pattern_to_node.items() if pattern.has_multiple_users() and node is not None}", + "docstring": "Internal state needed while running PatternExpr._match().", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py", + "ast_data": "ClassDef name:MatchContext FunctionDef name:__init__ arg:self arg:outputs arg:pattern_to_node arguments arg arg arg arg Assign Assign Compare Call Assign Assign FunctionDef name:match arg:self arg:pattern arg:node arguments arg arg arg If Compare If Compare Return return:yes Call Return return:yes Call Assign Call Compare Assign Return return:yes FunctionDef name:filter_multi_user_patterns arg:self arguments arg Return return:yes Call BoolOp Call Compare" + }, + { + "library": "django", + "name": "filter", + "source_code": "def filter(self, name=None, filter_func=None, **flags):\n if name is None and filter_func is None:\n\n def dec(func):\n return self.filter_function(func, **flags)\n return dec\n elif name is not None and filter_func is None:\n if callable(name):\n return self.filter_function(name, **flags)\n else:\n\n def dec(func):\n return self.filter(name, func, **flags)\n return dec\n elif name is not None and filter_func is not None:\n self.filters[name] = filter_func\n for attr in ('expects_localtime', 'is_safe', 'needs_autoescape'):\n if attr in flags:\n value = flags[attr]\n setattr(filter_func, attr, value)\n setattr(unwrap(filter_func), attr, value)\n filter_func._filter_name = name\n return filter_func\n else:\n raise ValueError('Unsupported arguments to Library.filter: (%r, %r)' % (name, filter_func))", + "docstring": "Register a callable as a template filter. Example: @register.filter def lower(value): return value.lower()", + "type": "method", + "file_path": "django\\django\\template\\library.py", + "ast_data": "FunctionDef name:filter arg:self arg:name arg:filter_func arguments arg arg arg arg If BoolOp Compare Compare FunctionDef name:dec arg:func arguments arg Return return:yes Call Return return:yes If BoolOp Compare Compare If Call Return return:yes Call FunctionDef name:dec arg:func arguments arg Return return:yes Call Return return:yes If BoolOp Compare Compare Assign For If Compare Assign Call Call Call Assign Return return:yes Raise Call" + }, + { + "library": "pytorch", + "name": "check_equal", + "source_code": "def check_equal(self, other: ShapeEnv) -> None:\n non_state_variable_names = ('counter', 'log', 'var_to_stack', 'fx_node_cache', 'graph', 'validator', 'check_recorded_events', 'should_record_events', 'is_recording', 'tracked_fakes', 'events', 'source_name_to_debug_name', '_prev_cache_key', '_version_counter', 'dim_constraints', 'var_to_range_sloc', 'replacements_slocs', '_resimplify_floor_div_axioms', '_expr_sym_node_id', 'specialization_stacks')\n\n def map_value(key: str, value: Any) -> Any:\n if key in ('unbacked_symfloat_counter', 'unbacked_symint_counter'):\n from copy import copy\n return next(copy(value))\n elif key == 'guards':\n return [g.expr for g in value]\n elif key == 'deferred_runtime_asserts':\n return {s: [ra.expr for ra in ras] for s, ras in value.items()}\n elif key == 'name_to_node':\n return set(value.keys())\n elif key in ('symbol_guard_counter', 'pending_fresh_unbacked_symbols', 'fake_tensor_cache'):\n return None\n return value\n shape_env_check_state_equal(self, other, non_state_variable_names, map_value)", + "docstring": "Compare another ShapeEnv for equivalence", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:check_equal arg:self arg:other arguments arg arg Assign FunctionDef name:map_value arg:key arg:value arguments arg arg If Compare Return return:yes Call Call If Compare Return return:yes If Compare Return return:yes Call If Compare Return return:yes Call Call If Compare Return return:no Return return:yes Call" + }, + { + "library": "django", + "name": "__get__", + "source_code": "def __get__(self, instance, cls=None):\n if instance is None:\n return self\n if isinstance(instance._route, str):\n instance.__dict__['regex'] = re.compile(instance._regex)\n return instance.__dict__['regex']\n language_code = get_language()\n if language_code not in instance._regex_dict:\n instance._regex_dict[language_code] = re.compile(_route_to_regex(str(instance._route), instance._is_endpoint)[0])\n return instance._regex_dict[language_code]", + "docstring": "Return a compiled regular expression based on the active language.", + "type": "method", + "file_path": "django\\django\\urls\\resolvers.py", + "ast_data": "FunctionDef name:__get__ arg:self arg:instance arg:cls arguments arg arg arg If Compare Return return:yes If Call Assign Call Return return:yes Assign Call If Compare Assign Call Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "pep440_split_post", + "source_code": "def pep440_split_post(ver):\n vc = str.split(ver, '.post')\n return (vc[0], int(vc[1] or 0) if len(vc) == 2 else None)", + "docstring": "Split pep440 version string at the post-release segment. Returns the release segments before the post-release and the post-release version number (or -1 if no post-release segment is present).", + "type": "function", + "file_path": "pandas\\pandas\\_version.py", + "ast_data": "FunctionDef name:pep440_split_post arg:ver arguments arg Assign Call Return return:yes Compare Call Call BoolOp" + }, + { + "library": "numpy", + "name": "__call__", + "source_code": "def __call__(self, x):\n with np.errstate(invalid='ignore'):\n return umath.less(x, self.critical_value)", + "docstring": "Executes the call behavior.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg With Call Return return:yes Call" + }, + { + "library": "authlib", + "name": "generate_user_info", + "source_code": "def generate_user_info(self, user, scope: str) -> UserInfo:\n raise NotImplementedError()", + "docstring": "Generate a :class: object for an user:: def generate_user_info(self, user, scope: str) -> UserInfo: return UserInfo( given_name=user.given_name, family_name=user.last_name, email=user.email, ... ).filter(scope) This method must be implemented by developers.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\core\\userinfo.py", + "ast_data": "FunctionDef name:generate_user_info arg:self arg:user arg:scope arguments arg arg arg Raise Call" + }, + { + "library": "authlib", + "name": "fetch_access_token", + "source_code": "def fetch_access_token(self, redirect_uri=None, **kwargs):\n metadata = self.load_server_metadata()\n token_endpoint = self.access_token_url or metadata.get('token_endpoint')\n with self._get_oauth_client(**metadata) as client:\n if redirect_uri is not None:\n client.redirect_uri = redirect_uri\n params = {}\n if self.access_token_params:\n params.update(self.access_token_params)\n params.update(kwargs)\n token = client.fetch_token(token_endpoint, **params)\n return token", + "docstring": "Fetch access token in the final step. :param redirect_uri: Callback or Redirect URI that is used in previous :meth:. :param kwargs: Extra parameters to fetch access token. :return: A token dict.", + "type": "method", + "file_path": "authlib\\authlib\\integrations\\base_client\\sync_app.py", + "ast_data": "FunctionDef name:fetch_access_token arg:self arg:redirect_uri arguments arg arg arg Assign Call Assign BoolOp Call With Call If Compare Assign Assign If Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_make_output_composite_tensors_match", + "source_code": "def _make_output_composite_tensors_match(op_type, branch_graphs):\n assert branch_graphs\n branch_outputs = [g.structured_outputs for g in branch_graphs]\n outputs_per_branch = list((len(outs) for outs in branch_outputs))\n assert len(set(outputs_per_branch)) == 1, outputs_per_branch\n for output_idx, branch_outs in enumerate(zip(*branch_outputs)):\n if len(set((type(out) for out in branch_outs))) == 1:\n continue\n if not any((isinstance(out, indexed_slices.IndexedSlices) for out in branch_outs)):\n continue\n for branch_idx, branch_out in enumerate(branch_outs):\n if isinstance(branch_out, indexed_slices.IndexedSlices):\n continue\n elif isinstance(branch_out, tensor_lib.Tensor):\n with branch_graphs[branch_idx].as_default():\n branch_outputs[branch_idx][output_idx] = math_ops._as_indexed_slices(branch_out)\n else:\n raise TypeError('Cannot reconcile {op_name} {output_idx}-th outputs:\\n outputs from all branches: {outputs}'.format(op_name='tf.cond' if op_type == _COND else 'tf.switch_case', output_idx=output_idx, outputs=branch_outs))\n for branch_graph, branch_outs in zip(branch_graphs, branch_outputs):\n branch_graph.structured_outputs = branch_outs\n branch_graph.outputs = [t for t in func_graph_module.flatten(branch_outs) if t is not None]", + "docstring": "Modifies each branch_graph's outputs to have the same output signature. Currently the only transformation implemented is turning a Tensor into an equivalent IndexedSlices if the other branch returns an IndexedSlices. Updates branch_graph.{outputs,structured_outputs} for each branch_graph in branch_graphs. Args: op_type: _COND or _CASE branch_graphs: of Raises: TypeError: if a set of outputs cannot be rewritten.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py", + "ast_data": "FunctionDef name:_make_output_composite_tensors_match arg:op_type arg:branch_graphs arguments arg arg Assign Assign Call Call Compare Call Call For Call Call If Compare Call Call Call If Call Call For Call If Call If Call With Call Assign Call Raise Call Call Compare For Call Assign Assign Call Compare" + }, + { + "library": "scipy", + "name": "findpole", + "source_code": "def findpole(cpen, cval, fval):\n num_vars = np.size(fval) - 1\n if DEBUGGING:\n assert cpen > 0\n assert np.size(cval) == num_vars + 1 and (not any(cval < 0 | np.isnan(cval) | np.isposinf(cval)))\n assert np.size(fval) == num_vars + 1 and (not any(np.isnan(fval) | np.isposinf(fval)))\n jopt = np.size(fval) - 1\n phi = fval + cpen * cval\n phimin = min(phi)\n if phimin < phi[jopt] or any((cval < cval[jopt]) & (phi <= phi[jopt])):\n jopt = np.ma.array(cval, mask=phi > phimin).argmin()\n if DEBUGGING:\n assert jopt >= 0 and jopt < num_vars + 1\n assert jopt == num_vars or phi[jopt] < phi[num_vars] or (phi[jopt] <= phi[num_vars] and cval[jopt] < cval[num_vars])\n return jopt", + "docstring": "This subroutine identifies the best vertex of the current simplex with respect to the merit function PHI = F + CPEN * CSTRV.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\cobyla\\update.py", + "ast_data": "FunctionDef name:findpole arg:cpen arg:cval arg:fval arguments arg arg arg Assign Call If Compare BoolOp Compare Call Call Compare Call Call BoolOp Compare Call Call Call Call Assign Call Assign Assign Call If BoolOp Compare Call Compare Compare Assign Call Call Compare If BoolOp Compare Compare BoolOp Compare Compare BoolOp Compare Compare Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_source_inputs", + "source_code": "def get_source_inputs(tensor, layer=None, node_index=None):\n if not hasattr(tensor, '_keras_history'):\n return tensor\n if layer is None or node_index:\n layer, node_index, _ = tensor._keras_history\n if not layer._inbound_nodes:\n return [tensor]\n else:\n node = layer._inbound_nodes[node_index]\n if node.is_input:\n return nest.flatten(node.input_tensors)\n else:\n source_tensors = []\n for layer, node_index, _, tensor in node.iterate_inbound():\n previous_sources = get_source_inputs(tensor, layer, node_index)\n for x in previous_sources:\n if all((x is not t for t in source_tensors)):\n source_tensors.append(x)\n return source_tensors", + "docstring": "Returns the list of input tensors necessary to compute . Output will always be a list of tensors (potentially with 1 element). Args: tensor: The tensor to start from. layer: Origin layer of the tensor. Will be determined via tensor._keras_history if not provided. node_index: Origin node index of the tensor. Returns: List of input tensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\layer_utils.py", + "ast_data": "FunctionDef name:get_source_inputs arg:tensor arg:layer arg:node_index arguments arg arg arg If Call Return return:yes If BoolOp Compare Assign If Return return:yes Assign If Return return:yes Call Assign For Call Assign Call For If Call Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "scatter_min", + "source_code": "def scatter_min(self, sparse_delta, use_locking=False, name=None):\n raise NotImplementedError", + "docstring": "Updates this variable with the min of and itself. Args: sparse_delta: to use as an argument of min with this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:scatter_min arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg Raise" + }, + { + "library": "pandas", + "name": "book", + "source_code": "@property\ndef book(self):\n return self._book", + "docstring": "Book instance of class xlsxwriter.Workbook. This attribute can be used to access engine-specific features.", + "type": "method", + "file_path": "pandas\\pandas\\io\\excel\\_xlsxwriter.py", + "ast_data": "FunctionDef name:book arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "get_finder", + "source_code": "@functools.cache\ndef get_finder(import_path):\n Finder = import_string(import_path)\n if not issubclass(Finder, BaseFinder):\n raise ImproperlyConfigured('Finder \"%s\" is not a subclass of \"%s\"' % (Finder, BaseFinder))\n return Finder()", + "docstring": "Import the staticfiles finder class described by import_path, where import_path is the full Python path to the class.", + "type": "function", + "file_path": "django\\django\\contrib\\staticfiles\\finders.py", + "ast_data": "FunctionDef name:get_finder arg:import_path arguments arg Assign Call If Call Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_stop_trace", + "source_code": "def _stop_trace(self, batch=None):\n if batch is None:\n batch = self._stop_batch\n with self._train_writer.as_default():\n with summary_ops_v2.record_if(True):\n summary_ops_v2.trace_export(name='batch_%d' % batch, step=batch)\n self._stop_profiler()\n self._is_tracing = False", + "docstring": "Logs the trace graph to TensorBoard.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:_stop_trace arg:self arg:batch arguments arg arg If Compare Assign With Call With Call Call Call Assign" + }, + { + "library": "matplotlib", + "name": "_view_axes", + "source_code": "def _view_axes(E, R, V, roll):\n w = E - R\n w = w / np.linalg.norm(w)\n u = np.cross(V, w)\n u = u / np.linalg.norm(u)\n v = np.cross(w, u)\n if roll != 0:\n Rroll = _rotation_about_vector(w, -roll)\n u = np.dot(Rroll, u)\n v = np.dot(Rroll, v)\n return (u, v, w)", + "docstring": "Get the unit viewing axes in data coordinates. Parameters ---------- E : 3-element numpy array The coordinates of the eye/camera. R : 3-element numpy array The coordinates of the center of the view box. V : 3-element numpy array Unit vector in the direction of the vertical axis. roll : float The roll angle in radians. Returns ------- u : 3-element numpy array Unit vector pointing towards the right of the screen. v : 3-element numpy array Unit vector pointing towards the top of the screen. w : 3-element numpy array Unit vector pointing out of the screen.", + "type": "function", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\proj3d.py", + "ast_data": "FunctionDef name:_view_axes arg:E arg:R arg:V arg:roll arguments arg arg arg arg Assign Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_inv_z", + "source_code": "def _inv_z(self, z):\n with ops.name_scope('reconstruct', values=[z]):\n return z * self.scale + self.loc", + "docstring": "Reconstruct input from a its normalized version.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\normal.py", + "ast_data": "FunctionDef name:_inv_z arg:self arg:z arguments arg arg With Call Return return:yes" + }, + { + "library": "pytorch", + "name": "forward", + "source_code": "@staticmethod\ndef forward(*args: Any, **kwargs: Any) -> Any:\n raise NotImplementedError('You must implement the forward function for custom autograd.Function.')", + "docstring": "Define the forward of the custom autograd Function. This function is to be overridden by all subclasses. There are two ways to define forward: Usage 1 (Combined forward and ctx):: @staticmethod def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any: pass - It must accept a context ctx as the first argument, followed by any number of arguments (tensors or other types). - See :ref: for more details Usage 2 (Separate forward and ctx):: @staticmethod def forward(*args: Any, **kwargs: Any) -> Any: pass @staticmethod def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None: pass - The forward no longer accepts a ctx argument. - Instead, you must also override the :meth: staticmethod to handle setting up the `extending-autogradctxctx.save_for_backwardctx.save_for_forward`.", + "type": "method", + "file_path": "pytorch\\torch\\autograd\\function.py", + "ast_data": "FunctionDef name:forward arguments arg arg Raise Call" + }, + { + "library": "numpy", + "name": "fromroots", + "source_code": "@classmethod\ndef fromroots(cls, roots, domain=[], window=None, symbol='x'):\n [roots] = pu.as_series([roots], trim=False)\n if domain is None:\n domain = pu.getdomain(roots)\n elif isinstance(domain, list) and len(domain) == 0:\n domain = cls.domain\n if window is None:\n window = cls.window\n deg = len(roots)\n off, scl = pu.mapparms(domain, window)\n rnew = off + scl * roots\n coef = cls._fromroots(rnew) / scl ** deg\n return cls(coef, domain=domain, window=window, symbol=symbol)", + "docstring": "Return series instance that has the specified roots. Returns a series representing the product `` is a list of roots. Parameters ---------- roots : array_like List of roots. domain : {[], None, array_like}, optional Domain for the resulting series. If None the domain is the interval from the smallest root to the largest. If [] the domain is the class domain. The default is []. window : {None, array_like}, optional Window for the returned series. If None the class window is used. The default is None. symbol : str, optional Symbol representing the independent variable. Default is 'x'. Returns ------- new_series : series Series with the specified roots.", + "type": "method", + "file_path": "numpy\\numpy\\polynomial\\_polybase.py", + "ast_data": "FunctionDef name:fromroots arg:cls arg:roots arg:domain arg:window arg:symbol arguments arg arg arg arg arg Assign Call If Compare Assign Call If BoolOp Call Compare Call Assign If Compare Assign Assign Call Assign Call Assign Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_pil_png_to_float_array", + "source_code": "def _pil_png_to_float_array(pil_png):\n mode = pil_png.mode\n rawmode = pil_png.png.im_rawmode\n if rawmode == '1':\n return np.asarray(pil_png, np.float32)\n if rawmode == 'L;2':\n return np.divide(pil_png, 2 ** 2 - 1, dtype=np.float32)\n if rawmode == 'L;4':\n return np.divide(pil_png, 2 ** 4 - 1, dtype=np.float32)\n if rawmode == 'L':\n return np.divide(pil_png, 2 ** 8 - 1, dtype=np.float32)\n if rawmode == 'I;16B':\n return np.divide(pil_png, 2 ** 16 - 1, dtype=np.float32)\n if mode == 'RGB':\n return np.divide(pil_png, 2 ** 8 - 1, dtype=np.float32)\n if mode == 'P':\n return np.divide(pil_png.convert('RGBA'), 2 ** 8 - 1, dtype=np.float32)\n if mode == 'LA':\n return np.divide(pil_png.convert('RGBA'), 2 ** 8 - 1, dtype=np.float32)\n if mode == 'RGBA':\n return np.divide(pil_png, 2 ** 8 - 1, dtype=np.float32)\n raise ValueError(f'Unknown PIL rawmode: {rawmode}')", + "docstring": "Convert a PIL to a 0-1 float array.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\image.py", + "ast_data": "FunctionDef name:_pil_png_to_float_array arg:pil_png arguments arg Assign Assign If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Call If Compare Return return:yes Call Call If Compare Return return:yes Call Raise Call" + }, + { + "library": "numpy", + "name": "get_flags_linker_exe", + "source_code": "def get_flags_linker_exe(self):\n return self._get_command_flags('linker_exe')", + "docstring": "List of linker flags to build an executable.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py", + "ast_data": "FunctionDef name:get_flags_linker_exe arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "GroupNorm", + "source_code": "class GroupNorm(torch.nn.GroupNorm):\n __constants__ = ['num_groups', 'num_channels', 'eps', 'affine']\n\n def __init__(self, num_groups, num_channels, weight, bias, scale, zero_point, eps=1e-05, affine=True, device=None, dtype=None) -> None:\n factory_kwargs = {'device': device, 'dtype': dtype}\n super().__init__(num_groups, num_channels, eps, affine, **factory_kwargs)\n self.weight = weight\n self.bias = bias\n self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))\n self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))\n\n def forward(self, input):\n return torch.ops.quantized.group_norm(input, self.num_groups, self.weight, self.bias, self.eps, self.scale, self.zero_point)\n\n def _get_name(self):\n return 'QuantizedGroupNorm'\n\n @classmethod\n def from_float(cls, mod, use_precomputed_fake_quant=False):\n scale, zero_point = mod.activation_post_process.calculate_qparams()\n new_mod = cls(mod.num_groups, mod.num_channels, mod.weight, mod.bias, float(scale), int(zero_point), mod.eps, mod.affine)\n return new_mod", + "docstring": "This is the quantized version of :class:. Additional args: * **scale** - quantization scale of the output, type: double. * **zero_point** - quantization zero point of the output, type: long.", + "type": "class", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\normalization.py", + "ast_data": "ClassDef name:GroupNorm Assign FunctionDef name:__init__ arg:self arg:num_groups arg:num_channels arg:weight arg:bias arg:scale arg:zero_point arg:eps arg:affine arg:device arg:dtype arguments arg arg arg arg arg arg arg arg arg arg arg Assign Call Call Assign Assign Call Call Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_remove_autograd_hooks", + "source_code": "def _remove_autograd_hooks(self):\n self.reducer._remove_autograd_hooks()", + "docstring": "Remove autograd hooks registered by the reducer on the model parameters.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py", + "ast_data": "FunctionDef name:_remove_autograd_hooks arg:self arguments arg Call" + }, + { + "library": "pytorch", + "name": "FlattenInputWithTreeSpecValidationInputStep", + "source_code": "class FlattenInputWithTreeSpecValidationInputStep(InputAdaptStep):\n _spec: pytree.TreeSpec | None = None\n\n def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n flattened_args, spec = pytree.tree_flatten((model_args, model_kwargs))\n if self._spec is None:\n self._spec = spec\n else:\n _assert_identical_pytree_spec(self._spec, spec, error_message='Model inputs incompatible with the format that was exported. ')\n return (flattened_args, {})", + "docstring": "Flatten nested collection types and return a flat list of elements. ONNX can't represent collection types (e.g., dictionary, tuple of tuple of tensor, etc). This class stores the output produced when was called the first time. It then validates the output produced from later calls.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py", + "ast_data": "ClassDef name:FlattenInputWithTreeSpecValidationInputStep FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Assign Call If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "name", + "source_code": "@property\ndef name(self):\n return self._name", + "docstring": "The name of this object. Used for checkpointing.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "TruncatedNormal", + "source_code": "class TruncatedNormal(Initializer):\n\n def __init__(self, mean=0.0, stddev=0.05, seed=None):\n self.mean = mean\n self.stddev = stddev\n self.seed = seed\n self._random_generator = _RandomGenerator(seed)\n\n def __call__(self, shape, dtype=None, **kwargs):\n _validate_kwargs(self.__class__.__name__, kwargs)\n dtype = _assert_float_dtype(_get_dtype(dtype))\n if _PARTITION_SHAPE in kwargs:\n shape = kwargs[_PARTITION_SHAPE]\n return self._random_generator.truncated_normal(shape, self.mean, self.stddev, dtype)\n\n def get_config(self):\n return {'mean': self.mean, 'stddev': self.stddev, 'seed': self.seed}", + "docstring": "Initializer that generates a truncated normal distribution. Also available via the shortcut function . The values generated are similar to values from a initializer except that values more than two standard deviations from the mean are discarded and re-drawn. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate before truncation. seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py", + "ast_data": "ClassDef name:TruncatedNormal FunctionDef name:__init__ arg:self arg:mean arg:stddev arg:seed arguments arg arg arg arg Assign Assign Assign Assign Call FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call Call If Compare Assign Return return:yes Call FunctionDef name:get_config arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "__getitem__", + "source_code": "def __getitem__(self, arg: PositionalIndexer | tuple) -> DataFrame | Series:\n mask = self.groupby_object._make_mask_from_positional_indexer(arg)\n return self.groupby_object._mask_selected_obj(mask)", + "docstring": "Select by positional index per group. Implements GroupBy._positional_selector Parameters ---------- arg : PositionalIndexer | tuple Allowed values are: - int - int valued iterable such as list or range - slice with step either None or positive - tuple of integers and slices Returns ------- Series The filtered subset of the original groupby Series. DataFrame The filtered subset of the original groupby DataFrame. See Also -------- DataFrame.iloc : Integer-location based indexing for selection by position. GroupBy.head : Return first n rows of each group. GroupBy.tail : Return last n rows of each group. GroupBy._positional_selector : Return positional selection for each group. GroupBy.nth : Take the nth row from each group if n is an int, or a subset of rows, if n is a list of ints.", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\indexing.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:arg arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "ctc_decode", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):\n input_shape = shape(y_pred)\n num_samples, num_steps = (input_shape[0], input_shape[1])\n y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())\n input_length = math_ops.cast(input_length, dtypes_module.int32)\n if greedy:\n decoded, log_prob = ctc.ctc_greedy_decoder(inputs=y_pred, sequence_length=input_length)\n else:\n decoded, log_prob = ctc.ctc_beam_search_decoder(inputs=y_pred, sequence_length=input_length, beam_width=beam_width, top_paths=top_paths)\n decoded_dense = []\n for st in decoded:\n st = sparse_tensor.SparseTensor(st.indices, st.values, (num_samples, num_steps))\n decoded_dense.append(sparse_ops.sparse_tensor_to_dense(sp_input=st, default_value=-1))\n return (decoded_dense, log_prob)", + "docstring": "Decodes the output of a softmax. Can use either greedy search (also known as best path) or a constrained dictionary search. Args: y_pred: tensor containing the prediction, or output of the softmax. input_length: tensor containing the sequence length for each batch item in . greedy: perform much faster best-path search if . This does not use a dictionary. beam_width: if is : a beam search decoder will be used with a beam of this width. top_paths: if is , how many of the most probable paths will be returned. Returns: Tuple: List: if is , returns a list of one element that contains the decoded sequence. If , returns the most probable decoded sequences. Each decoded sequence has shape (samples, time_steps). Important: blank labels are returned as . Tensor that contains the log probability of each decoded sequence.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:ctc_decode arg:y_pred arg:input_length arg:greedy arg:beam_width arg:top_paths arguments arg arg arg arg arg Assign Call Assign Assign Call Call Call Assign Call If Assign Call Assign Call Assign For Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "__call__", + "source_code": "def __call__(self, graph_module: torch.fx.GraphModule, args) -> torch.fx.GraphModule:\n if self._options.use_aot_autograd:\n from functorch.compile import min_cut_rematerialization_partition\n from torch._dynamo.backends.common import aot_autograd\n return aot_autograd(fw_compiler=self.compile, partition_fn=min_cut_rematerialization_partition, decompositions=self._resolved_onnx_exporter_options.decomposition_table)(graph_module, args)\n return self.compile(graph_module, args)", + "docstring": "If `auto_autograd` method is invoked directly.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:graph_module arg:args arguments arg arg arg If Return return:yes Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "getdefiningclass", + "source_code": "def getdefiningclass(m, owner_class):\n method_name = m.__name__\n for super_class in inspect.getmro(owner_class):\n if hasattr(super_class, '__dict__') and method_name in super_class.__dict__ or (hasattr(super_class, '__slots__') and method_name in super_class.__slots__):\n return super_class\n return owner_class", + "docstring": "Resolves the class (e.g. one of the superclasses) that defined a method.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\inspect_utils.py", + "ast_data": "FunctionDef name:getdefiningclass arg:m arg:owner_class arguments arg arg Assign For Call If BoolOp BoolOp Call Compare BoolOp Call Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "check_steps_argument", + "source_code": "def check_steps_argument(input_data, steps, steps_name):\n is_x_iterator = isinstance(input_data, (iterator_ops.Iterator, iterator_ops.IteratorBase))\n if input_data is None or is_x_iterator or has_symbolic_tensors(input_data) or (isinstance(input_data, list) and (not input_data)):\n if steps is None:\n input_type_str = 'a Dataset iterator' if is_x_iterator else 'data tensors'\n raise ValueError('When using {input_type} as input to a model, you should specify the `{steps_name}` argument.'.format(input_type=input_type_str, steps_name=steps_name))\n return True\n if isinstance(input_data, (data_types.DatasetV1, data_types.DatasetV2)):\n return True\n if steps is not None:\n list_types = (np.ndarray, list, tuple)\n if isinstance(input_data, list_types) or (isinstance(input_data, dict) and any((isinstance(v, list_types) for v in input_data.values()))):\n logging.warning('When passing input data as arrays, do not specify `steps_per_epoch`/`steps` argument. Please use `batch_size` instead.')\n return False", + "docstring": "Validates argument based on input data's type. The cases when value must be provided are when 1. input data passed is an iterator. 2. model was built on top of symbolic tensors, input data is not required and is . 3. input data passed is a symbolic tensor. Args: input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or tf.data.Dataset iterator or . steps: Integer or . Total number of steps (batches of samples) to execute. steps_name: The public API's parameter name for . Returns: boolean, True if argument is required, else False. Raises: ValueError: if argument is required for given input data type but not provided.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", + "ast_data": "FunctionDef name:check_steps_argument arg:input_data arg:steps arg:steps_name arguments arg arg arg Assign Call If BoolOp Compare Call BoolOp Call If Compare Assign Raise Call Call Return return:yes If Call Return return:yes If Compare Assign If BoolOp Call BoolOp Call Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "register", + "source_code": "def register(self, name: str, opset: OpsetVersion, func: Callable, custom: bool=False) -> None:\n if '::' not in name:\n raise ValueError(f\"The name must be in the form of 'domain::op', not '{name}'\")\n symbolic_functions = self._registry.setdefault(name, _SymbolicFunctionGroup(name))\n if custom:\n symbolic_functions.add_custom(func, opset)\n else:\n symbolic_functions.add(func, opset)", + "docstring": "Registers a symbolic function. Args: name: The qualified name of the function to register. In the form of 'domain::op'. E.g. 'aten::add'. opset: The opset version of the function to register. func: The symbolic function to register. custom: Whether the function is a custom function that overrides existing ones. Raises: ValueError: If the separator '::' is not in the name.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py", + "ast_data": "FunctionDef name:register arg:self arg:name arg:opset arg:func arg:custom arguments arg arg arg arg arg If Compare Raise Call Assign Call Call If Call Call" + }, + { + "library": "django", + "name": "exists", + "source_code": "def exists(self, session_key):\n raise NotImplementedError('subclasses of SessionBase must provide an exists() method')", + "docstring": "Return True if the given session_key already exists.", + "type": "method", + "file_path": "django\\django\\contrib\\sessions\\backends\\base.py", + "ast_data": "FunctionDef name:exists arg:self arg:session_key arguments arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "_enforce_names_consistency", + "source_code": "def _enforce_names_consistency(specs):\n\n def _has_name(spec):\n return hasattr(spec, 'name') and spec.name is not None\n\n def _clear_name(spec):\n spec = copy.deepcopy(spec)\n if hasattr(spec, 'name'):\n spec._name = None\n return spec\n flat_specs = nest.flatten(specs)\n name_inconsistency = any((_has_name(s) for s in flat_specs)) and (not all((_has_name(s) for s in flat_specs)))\n if name_inconsistency:\n specs = nest.map_structure(_clear_name, specs)\n return specs", + "docstring": "Enforces that either all specs have names or none do.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\tflite_keras_util.py", + "ast_data": "FunctionDef name:_enforce_names_consistency arg:specs arguments arg FunctionDef name:_has_name arg:spec arguments arg Return return:yes BoolOp Call Compare FunctionDef name:_clear_name arg:spec arguments arg Assign Call If Call Assign Return return:yes Assign Call Assign BoolOp Call Call Call Call If Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "_givens_to_1", + "source_code": "def _givens_to_1(self, aii, ajj, aij):\n aiid = aii - 1.0\n ajjd = ajj - 1.0\n if ajjd == 0:\n return (0.0, 1.0)\n dd = math.sqrt(max(aij ** 2 - aiid * ajjd, 0))\n t = (aij + math.copysign(dd, aij)) / ajjd\n c = 1.0 / math.sqrt(1.0 + t * t)\n if c == 0:\n s = 1.0\n else:\n s = c * t\n return (c, s)", + "docstring": "Computes a 2x2 Givens matrix to put 1's on the diagonal. The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ]. The output matrix g is a 2x2 anti-symmetric matrix of the form [ c s ; -s c ]; the elements c and s are returned. Applying the output matrix to the input matrix (as b=g.T M g) results in a matrix with bii=1, provided tr(M) - det(M) >= 1 and floating point issues do not occur. Otherwise, some other valid rotation is returned. When tr(M)==2, also bjj=1.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:_givens_to_1 arg:self arg:aii arg:ajj arg:aij arguments arg arg arg arg Assign Assign If Compare Return return:yes Assign Call Call Assign Call Assign Call If Compare Assign Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "_make_array", + "source_code": "def _make_array(inp):\n r0, *rest = inp\n if isinstance(r0, str):\n raise ValueError('List mosaic specification must be 2D')\n for j, r in enumerate(rest, start=1):\n if isinstance(r, str):\n raise ValueError('List mosaic specification must be 2D')\n if len(r0) != len(r):\n raise ValueError(f'All of the rows must be the same length, however the first row ({r0!r}) has length {len(r0)} and row {j} ({r!r}) has length {len(r)}.')\n out = np.zeros((len(inp), len(r0)), dtype=object)\n for j, r in enumerate(inp):\n for k, v in enumerate(r):\n out[j, k] = v\n return out", + "docstring": "Convert input into 2D array We need to have this internal function rather than `` so that a list of lists of lists does not get converted to an array of dimension > 2. Returns ------- 2D object array", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:_make_array arg:inp arguments arg Assign If Call Raise Call For Call If Call Raise Call If Compare Call Call Raise Call Call Call Assign Call Call Call For Call For Call Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self):\n cherrypy.Tool.__init__(self, 'on_end_request', self.record_stop)", + "docstring": "Initialize the statistics gathering tool.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg Call" + }, + { + "library": "django", + "name": "auth", + "source_code": "def auth(request):\n if hasattr(request, 'user'):\n user = request.user\n else:\n from django.contrib.auth.models import AnonymousUser\n user = AnonymousUser()\n return {'user': user, 'perms': PermWrapper(user)}", + "docstring": "Return context variables required by apps that use Django's authentication system. If there is no 'user' attribute in the request, use AnonymousUser (from django.contrib.auth).", + "type": "function", + "file_path": "django\\django\\contrib\\auth\\context_processors.py", + "ast_data": "FunctionDef name:auth arg:request arguments arg If Call Assign Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "normalize_dictlike_arg", + "source_code": "def normalize_dictlike_arg(self, how: str, obj: DataFrame | Series, func: AggFuncTypeDict) -> AggFuncTypeDict:\n assert how in ('apply', 'agg', 'transform')\n if how == 'agg' and isinstance(obj, ABCSeries) and any((is_list_like(v) for _, v in func.items())) or any((is_dict_like(v) for _, v in func.items())):\n raise SpecificationError('nested renamer is not supported')\n if obj.ndim != 1:\n from pandas import Index\n cols = Index(list(func.keys())).difference(obj.columns, sort=True)\n if len(cols) > 0:\n raise KeyError(f'Label(s) {list(cols)} do not exist')\n aggregator_types = (list, tuple, dict)\n if any((isinstance(x, aggregator_types) for _, x in func.items())):\n new_func: AggFuncTypeDict = {}\n for k, v in func.items():\n if not isinstance(v, aggregator_types):\n new_func[k] = [v]\n else:\n new_func[k] = v\n func = new_func\n return func", + "docstring": "Handler for dict-like argument. Ensures that necessary columns exist if obj is a DataFrame, and that a nested renamer is not passed. Also normalizes to all lists when values consists of a mix of list and non-lists.", + "type": "method", + "file_path": "pandas\\pandas\\core\\apply.py", + "ast_data": "FunctionDef name:normalize_dictlike_arg arg:self arg:how arg:obj arg:func arguments arg arg arg arg Compare If BoolOp BoolOp Compare Call Call Call Call Call Call Call Raise Call If Compare Assign Call Call Call Call If Compare Call Raise Call Call Assign If Call Call Call For Call If Call Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_sequence_length_feature_key_name", + "source_code": "def get_sequence_length_feature_key_name(self):\n return get_sequence_length_feature_key_name_from_feature_key_name(self.get_feature_key_name())", + "docstring": "Get the key for the associated sequence length feature.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py", + "ast_data": "FunctionDef name:get_sequence_length_feature_key_name arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "Rotate", + "source_code": "class Rotate(Module):\n\n def __init__(self, angle: Tensor, center: Union[None, Tensor]=None, mode: str='bilinear', padding_mode: str='zeros', align_corners: bool=True) -> None:\n super().__init__()\n self.angle: Tensor = angle\n self.center: Union[None, Tensor] = center\n self.mode: str = mode\n self.padding_mode: str = padding_mode\n self.align_corners: bool = align_corners\n\n def forward(self, input: Tensor) -> Tensor:\n return rotate(input, self.angle, self.center, self.mode, self.padding_mode, self.align_corners)", + "docstring": "Rotate the tensor anti-clockwise about the centre. Args: angle: The angle through which to rotate. The tensor must have a shape of (B), where B is batch size. center: The center through which to rotate. The tensor must have a shape of (B, 2), where B is batch size and last dimension contains cx and cy. mode: interpolation mode to calculate output values ``. align_corners: interpolation flag. Returns: The rotated tensor with the same shape as the input. Example: >>> img = torch.rand(1, 3, 4, 4) >>> angle = torch.tensor([90.]) >>> out = Rotate(angle)(img) >>> print(out.shape) torch.Size([1, 3, 4, 4])", + "type": "class", + "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py", + "ast_data": "ClassDef name:Rotate FunctionDef name:__init__ arg:self arg:angle arg:center arg:mode arg:padding_mode arg:align_corners arguments arg arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_height", + "source_code": "def get_height(self):\n return self._height", + "docstring": "Return the height of the ellipse.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:get_height arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_generate_input_signature", + "source_code": "def _generate_input_signature(self, layer):\n if isinstance(layer.call, def_function.Function) and layer.call.input_signature is not None:\n return layer.call.input_signature\n elif isinstance(layer, training_lib.Model):\n return saving_utils.model_input_signature(layer)\n elif layer.input_spec is not None and layer._use_input_spec_as_call_signature:\n\n def to_tensor_spec_or_none(x):\n spec = input_spec.to_tensor_spec(x, layer._compute_dtype)\n if spec.shape == tensor_shape.TensorShape(None):\n return None\n return spec\n input_signature = [nest.map_structure(to_tensor_spec_or_none, layer.input_spec)]\n return input_signature\n else:\n return None", + "docstring": "Inspects layer object and returns the inferred input signature. Args: layer: Layer object. Returns: List of possibly nested TensorSpecs of the layer call function inputs. The list does not contain the argument.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py", + "ast_data": "FunctionDef name:_generate_input_signature arg:self arg:layer arguments arg arg If BoolOp Call Compare Return return:yes If Call Return return:yes Call If BoolOp Compare FunctionDef name:to_tensor_spec_or_none arg:x arguments arg Assign Call If Compare Call Return return:no Return return:yes Assign Call Return return:yes Return return:no" + }, + { + "library": "scipy", + "name": "shape", + "source_code": "@property\ndef shape(self):\n return self._shape", + "docstring": "Shape of the covariance array", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_covariance.py", + "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "get_transform_params", + "source_code": "def get_transform_params(self, split_node: torch.fx.Node, next_users: list[torch.fx.Node], user_inputs_list: list[list[Union[torch.fx.Node, _Range]]]) -> Optional[list[list[_TransformParam]]]:\n split_dim = _get_dim(split_node)\n transform_params_list: list[list[_TransformParam]] = []\n for user_node, user_inputs in zip(next_users, user_inputs_list):\n cat_dim = get_arg_value(user_node, 1, 'dim') or 0\n transform_params: list[_TransformParam] = []\n for user_input in user_inputs:\n if isinstance(user_input, tuple):\n movedim_params = (split_dim, cat_dim) if split_dim != cat_dim else None\n flatten_params = None\n if user_node.target == torch.cat:\n flatten_params = (cat_dim, cat_dim + 1)\n transform_params.append((None, movedim_params, None, flatten_params))\n elif user_node.target == torch.stack:\n transform_params.append((None, None, (cat_dim,), None))\n else:\n transform_params.append((None, None, None, None))\n transform_params_list.append(transform_params)\n return transform_params_list", + "docstring": "Figure out what transforms are needed for each input to each cat node. Here is the rough transforms we apply: x -> unbind -> stack => x -> movedim x -> unbind -> cat => x -> movedim -> flatten When cat/stack nodes have additional args: addn ---| addn -> unsqueeze ---| x -> unbind -> stack => x -> movedim -> cat addn ---| addn ---| x -> unbind -> cat => x -> movedim -> flatten -> cat (Note application of these depends on the dims as well)", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\split_cat.py", + "ast_data": "FunctionDef name:get_transform_params arg:self arg:split_node arg:next_users arg:user_inputs_list arguments arg arg arg arg Assign Call For Call Assign BoolOp Call For If Call Assign Compare Assign If Compare Assign Call If Compare Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_flash_attention_forward_flop", + "source_code": "@register_flop_formula(aten._flash_attention_forward, get_raw=True)\ndef _flash_attention_forward_flop(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, *args, out_shape=None, **kwargs) -> int:\n sizes = _unpack_flash_attention_nested_shapes(query=query, key=key, value=value, cum_seq_q=cum_seq_q, cum_seq_k=cum_seq_k, max_q=max_q, max_k=max_k)\n return sum((sdpa_flop_count(query_shape, key_shape, value_shape) for query_shape, key_shape, value_shape, _ in sizes))", + "docstring": "Count flops for self-attention.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\flop_counter.py", + "ast_data": "FunctionDef name:_flash_attention_forward_flop arg:query arg:key arg:value arg:cum_seq_q arg:cum_seq_k arg:max_q arg:max_k arguments arg arg arg arg arg arg arg arg arg arg Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_Edge", + "source_code": "class _Edge(collections.namedtuple('_Edge', ['source', 'destination'])):\n __slots__ = ()\n\n def __str__(self):\n return '{} -> {}'.format(self.source, self.destination)", + "docstring": "A directed graph edge.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py", + "ast_data": "ClassDef name:_Edge Call Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "sphinx", + "name": "write_png_depth", + "source_code": "def write_png_depth(filename: str | os.PathLike[str], depth: int) -> None:\n data = struct.pack('!i', depth)\n with open(filename, 'r+b') as f:\n f.seek(-LEN_IEND, 2)\n f.write(DEPTH_CHUNK_LEN + DEPTH_CHUNK_START + data)\n crc = binascii.crc32(DEPTH_CHUNK_START + data) & 4294967295\n f.write(struct.pack('!I', crc))\n f.write(IEND_CHUNK)", + "docstring": "Write the special tEXt chunk indicating the depth to a PNG file. The chunk is placed immediately before the special IEND chunk.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\png.py", + "ast_data": "FunctionDef name:write_png_depth arg:filename arg:depth arguments arg arg Assign Call With Call Call Call Assign Call Call Call Call" + }, + { + "library": "django", + "name": "has_vary_header", + "source_code": "def has_vary_header(response, header_query):\n if not response.has_header('Vary'):\n return False\n vary_headers = cc_delim_re.split(response.headers['Vary'])\n existing_headers = {header.lower() for header in vary_headers}\n return header_query.lower() in existing_headers", + "docstring": "Check to see if the response has a given header name in its Vary header.", + "type": "function", + "file_path": "django\\django\\utils\\cache.py", + "ast_data": "FunctionDef name:has_vary_header arg:response arg:header_query arguments arg arg If Call Return return:yes Assign Call Assign Call Return return:yes Compare Call" + }, + { + "library": "pytorch", + "name": "elu", + "source_code": "def elu(input: Tensor, scale: float, zero_point: int, alpha: float=1.0) -> Tensor:\n if not input.is_quantized:\n raise ValueError(\"Input to 'quantized.elu' must be quantized!\")\n return torch.ops.quantized.elu(input, scale, zero_point, alpha)", + "docstring": "This is the quantized version of :func:. Args: input: quantized input scale: quantization scale of the output tensor zero_point: quantization zero point of the output tensor alpha: the alpha constant", + "type": "function", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py", + "ast_data": "FunctionDef name:elu arg:input arg:scale arg:zero_point arg:alpha arguments arg arg arg arg If Raise Call Return return:yes Call" + }, + { + "library": "django", + "name": "AlterModelManagers", + "source_code": "class AlterModelManagers(ModelOptionOperation):\n serialization_expand_args = ['managers']\n\n def __init__(self, name, managers):\n self.managers = managers\n super().__init__(name)\n\n def deconstruct(self):\n return (self.__class__.__qualname__, [self.name, self.managers], {})\n\n def state_forwards(self, app_label, state):\n state.alter_model_managers(app_label, self.name_lower, self.managers)\n\n def database_forwards(self, app_label, schema_editor, from_state, to_state):\n pass\n\n def database_backwards(self, app_label, schema_editor, from_state, to_state):\n pass\n\n def describe(self):\n return 'Change managers on %s' % self.name\n\n @property\n def migration_name_fragment(self):\n return 'alter_%s_managers' % self.name_lower", + "docstring": "Alter the model's managers.", + "type": "class", + "file_path": "django\\django\\db\\migrations\\operations\\models.py", + "ast_data": "ClassDef name:AlterModelManagers Assign FunctionDef name:__init__ arg:self arg:name arg:managers arguments arg arg arg Assign Call Call FunctionDef name:deconstruct arg:self arguments arg Return return:yes FunctionDef name:state_forwards arg:self arg:app_label arg:state arguments arg arg arg Call FunctionDef name:database_forwards arg:self arg:app_label arg:schema_editor arg:from_state arg:to_state arguments arg arg arg arg arg FunctionDef name:database_backwards arg:self arg:app_label arg:schema_editor arg:from_state arg:to_state arguments arg arg arg arg arg FunctionDef name:describe arg:self arguments arg Return return:yes FunctionDef name:migration_name_fragment arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "ExtensionIndex", + "source_code": "class ExtensionIndex(Index):\n _data: IntervalArray | NDArrayBackedExtensionArray\n\n def _validate_fill_value(self, value):\n return self._data._validate_setitem_value(value)\n\n @cache_readonly\n def _isnan(self) -> npt.NDArray[np.bool_]:\n return self._data.isna()", + "docstring": "Index subclass for indexes backed by ExtensionArray.", + "type": "class", + "file_path": "pandas\\pandas\\core\\indexes\\extension.py", + "ast_data": "ClassDef name:ExtensionIndex FunctionDef name:_validate_fill_value arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:_isnan arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_config_node_setter", + "source_code": "def _config_node_setter(self, setter):\n\n def setattr_wrapper(obj, name, value):\n if obj._lookup_dependency(name) is None:\n setter(obj, name, value)\n return setattr_wrapper", + "docstring": "Creates edges for nodes that are recreated from config.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py", + "ast_data": "FunctionDef name:_config_node_setter arg:self arg:setter arguments arg arg FunctionDef name:setattr_wrapper arg:obj arg:name arg:value arguments arg arg arg If Compare Call Call Return return:yes" + }, + { + "library": "cryptography", + "name": "load_public", + "source_code": "def load_public(self, data: memoryview) -> tuple[dsa.DSAPublicKey, memoryview]:\n (p, q, g, y), data = self.get_public(data)\n parameter_numbers = dsa.DSAParameterNumbers(p, q, g)\n public_numbers = dsa.DSAPublicNumbers(y, parameter_numbers)\n self._validate(public_numbers)\n public_key = public_numbers.public_key()\n return (public_key, data)", + "docstring": "Make DSA public key from data.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py", + "ast_data": "FunctionDef name:load_public arg:self arg:data arguments arg arg Assign Call Assign Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "authlib", + "name": "send_signal", + "source_code": "def send_signal(self, name, *args, **kwargs):\n raise NotImplementedError()", + "docstring": "Framework integration can re-implement this method to support signal system.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py", + "ast_data": "FunctionDef name:send_signal arg:self arg:name arguments arg arg arg arg Raise Call" + }, + { + "library": "django", + "name": "closed", + "source_code": "@property\ndef closed(self):\n return capi.geos_isclosed(self.ptr)", + "docstring": "Return whether or not this Geometry is closed.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:closed arg:self arguments arg Return return:yes Call" + }, + { + "library": "seaborn", + "name": "could_overlap", + "source_code": "def could_overlap(self, xyr_i, swarm):\n _, y_i, r_i = xyr_i\n neighbors = []\n for xyr_j in reversed(swarm):\n _, y_j, r_j = xyr_j\n if y_i - y_j < r_i + r_j:\n neighbors.append(xyr_j)\n else:\n break\n return np.array(neighbors)[::-1]", + "docstring": "Return a list of all swarm points that could overlap with target.", + "type": "method", + "file_path": "seaborn\\seaborn\\categorical.py", + "ast_data": "FunctionDef name:could_overlap arg:self arg:xyr_i arg:swarm arguments arg arg arg Assign Assign For Call Assign If Compare Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_check_conversion_params", + "source_code": "def _check_conversion_params(conversion_params, is_v2=False):\n supported_precision_modes = TrtPrecisionMode.supported_precision_modes()\n if conversion_params.precision_mode not in supported_precision_modes:\n raise ValueError(\"precision mode '{}' is not supported.It should be one of {}\".format(conversion_params.precision_mode, supported_precision_modes))\n if conversion_params.minimum_segment_size <= 0 and conversion_params.minimum_segment_size != -1:\n raise ValueError('minimum segment size should be positive or -1 (to disable main graph conversion).')", + "docstring": "Validate the provided TrtConversionParams. Args: conversion_params: a TrtConversionParams instance. is_v2: whether we're getting a RewriterConfig for TF 2.0. Raises: TypeError: if any of the parameters are of unexpected type. ValueError: if any of the parameters are of unexpected value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py", + "ast_data": "FunctionDef name:_check_conversion_params arg:conversion_params arg:is_v2 arguments arg arg Assign Call If Compare Raise Call Call If BoolOp Compare Compare Raise Call" + }, + { + "library": "django", + "name": "_update_unget_history", + "source_code": "def _update_unget_history(self, num_bytes):\n self._unget_history = [num_bytes] + self._unget_history[:49]\n number_equal = len([current_number for current_number in self._unget_history if current_number == num_bytes])\n if number_equal > 40:\n raise SuspiciousMultipartForm(\"The multipart parser got stuck, which shouldn't happen with normal uploaded files. Check for malicious upload activity; if there is none, report this to the Django developers.\")", + "docstring": "Update the unget history as a sanity check to see if we've pushed back the same number of bytes in one chunk. If we keep ungetting the same number of bytes many times (here, 50), we're mostly likely in an infinite loop of some sort. This is usually caused by a maliciously-malformed MIME request.", + "type": "method", + "file_path": "django\\django\\http\\multipartparser.py", + "ast_data": "FunctionDef name:_update_unget_history arg:self arg:num_bytes arguments arg arg Assign Assign Call Compare If Compare Raise Call" + }, + { + "library": "django", + "name": "DeferredAttribute", + "source_code": "class DeferredAttribute:\n\n def __init__(self, field):\n self.field = field\n\n def __get__(self, instance, cls=None):\n if instance is None:\n return self\n data = instance.__dict__\n field_name = self.field.attname\n if field_name not in data:\n val = self._check_parent_chain(instance)\n if val is None:\n if not instance._is_pk_set():\n raise AttributeError(f'Cannot retrieve deferred field {field_name!r} from an unsaved model.')\n instance.refresh_from_db(fields=[field_name])\n else:\n data[field_name] = val\n return data[field_name]\n\n def _check_parent_chain(self, instance):\n opts = instance._meta\n link_field = opts.get_ancestor_link(self.field.model)\n if self.field.primary_key and self.field != link_field:\n return getattr(instance, link_field.attname)\n return None", + "docstring": "A wrapper for a deferred-loading field. When the value is read from this object the first time, the query is executed.", + "type": "class", + "file_path": "django\\django\\db\\models\\query_utils.py", + "ast_data": "ClassDef name:DeferredAttribute FunctionDef name:__init__ arg:self arg:field arguments arg arg Assign FunctionDef name:__get__ arg:self arg:instance arg:cls arguments arg arg arg If Compare Return return:yes Assign Assign If Compare Assign Call If Compare If Call Raise Call Call Assign Return return:yes FunctionDef name:_check_parent_chain arg:self arg:instance arguments arg arg Assign Assign Call If BoolOp Compare Return return:yes Call Return return:no" + }, + { + "library": "tensorflow", + "name": "_NoopSummaryWriter", + "source_code": "class _NoopSummaryWriter(SummaryWriter):\n\n def set_as_default(self, step=None):\n pass\n\n @tf_contextlib.contextmanager\n def as_default(self, step=None):\n yield\n\n def init(self):\n pass\n\n def flush(self):\n pass\n\n def close(self):\n pass", + "docstring": "A summary writer that does nothing, for create_noop_writer().", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "ClassDef name:_NoopSummaryWriter FunctionDef name:set_as_default arg:self arg:step arguments arg arg FunctionDef name:as_default arg:self arg:step arguments arg arg FunctionDef name:init arg:self arguments arg FunctionDef name:flush arg:self arguments arg FunctionDef name:close arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "register_thread", + "source_code": "def register_thread(self, thread):\n with self._lock:\n self._registered_threads.add(thread)", + "docstring": "Register a thread to join. Args: thread: A Python thread to join.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py", + "ast_data": "FunctionDef name:register_thread arg:self arg:thread arguments arg arg With Call" + }, + { + "library": "scikit-learn", + "name": "inverse_transform", + "source_code": "def inverse_transform(self, X):\n check_is_fitted(self)\n X = check_array(X)\n return X @ self.components_ + self.mean_", + "docstring": "Transform data from the latent space to the original space. This inversion is an approximation due to the loss of information induced by the forward decomposition. .. versionadded:: 1.2 Parameters ---------- X : ndarray of shape (n_samples, n_components) Data in the latent space. Returns ------- X_original : ndarray of shape (n_samples, n_features) Reconstructed data in the original space.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_sparse_pca.py", + "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "learning_phase", + "source_code": "@doc_controls.do_not_generate_docs\ndef learning_phase():\n graph = ops.get_default_graph()\n if graph is getattr(_GRAPH, 'graph', None):\n learning_phase = symbolic_learning_phase()\n else:\n with ops.init_scope():\n learning_phase = _GRAPH_LEARNING_PHASES[None]\n _mark_func_graph_as_unsaveable(graph, learning_phase)\n return learning_phase", + "docstring": "Returns the learning phase flag. The learning phase flag is a bool tensor (0 = test, 1 = train) to be passed as input to any Keras function that uses a different behavior at train time and test time. Returns: Learning phase (scalar integer tensor or Python integer).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:learning_phase arguments Assign Call If Compare Call Assign Call With Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_adjust_scalar_from_onnx_to_fx", + "source_code": "def _adjust_scalar_from_onnx_to_fx(tensor: torch.Tensor, prim_value: Union[torch.Tensor, torch.SymInt, int, torch.SymFloat, float, torch.SymBool, bool]) -> Union[torch.Tensor, int, float, bool]:\n assert isinstance(tensor, torch.Tensor), \"ORT's output must be tensor.\"\n if isinstance(prim_value, (torch.SymInt, int, torch.SymFloat, float, torch.SymBool, bool)):\n return tensor.item()\n return tensor", + "docstring": "Helper function to wrap ORT-produced torch.Tensor as PyTorch variables", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py", + "ast_data": "FunctionDef name:_adjust_scalar_from_onnx_to_fx arg:tensor arg:prim_value arguments arg arg Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "broadcast_types", + "source_code": "def broadcast_types(t1, t2):\n if t1 == Dyn or t2 == Dyn or isinstance(t1, Var) or isinstance(t2, Var):\n return (t1, t2)\n if isinstance(t1, TensorType) and isinstance(t2, TensorType):\n s1 = len(t1.__args__)\n s2 = len(t2.__args__)\n new_t1 = list(t1.__args__)\n new_t2 = list(t2.__args__)\n if s1 > s2:\n for i in range(s1 - s2):\n new_t2.insert(0, 1)\n elif s2 > s1:\n for i in range(s2 - s1):\n new_t1.insert(0, 1)\n for i, (x, y) in enumerate(zip(new_t1, new_t2)):\n if x == 1:\n new_t1[i] = y\n elif y == 1:\n new_t2[i] = x\n t1, t2 = (TensorType(tuple(new_t1)), TensorType(tuple(new_t2)))\n return (t1, t2)\n else:\n raise TypeError(f'Cannot broadcast types {t1} and {t2}')", + "docstring": "Applies broadcasting to both given types such that they become consistent with eachother and returns two new resulting types", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py", + "ast_data": "FunctionDef name:broadcast_types arg:t1 arg:t2 arguments arg arg If BoolOp Compare Compare Call Call Return return:yes If BoolOp Call Call Assign Call Assign Call Assign Call Assign Call If Compare For Call Call If Compare For Call Call For Call Call If Compare Assign If Compare Assign Assign Call Call Call Call Return return:yes Raise Call" + }, + { + "library": "django", + "name": "AdminPasswordChangeForm", + "source_code": "class AdminPasswordChangeForm(SetUnusablePasswordMixin, SetPasswordMixin, forms.Form):\n required_css_class = 'required'\n usable_password_help_text = SetUnusablePasswordMixin.usable_password_help_text + '
  • If disabled, the current password for this user will be lost.
'\n password1, password2 = SetPasswordMixin.create_password_fields()\n\n def __init__(self, user, *args, **kwargs):\n self.user = user\n super().__init__(*args, **kwargs)\n self.fields['password1'].widget.attrs['autofocus'] = True\n if self.user.has_usable_password():\n self.fields['password1'].required = False\n self.fields['password2'].required = False\n self.fields['usable_password'] = SetUnusablePasswordMixin.create_usable_password_field(self.usable_password_help_text)\n\n def clean(self):\n self.validate_passwords()\n self.validate_password_for_user(self.user)\n return super().clean()\n\n def save(self, commit=True):\n return self.set_password_and_save(self.user, commit=commit)\n\n @property\n def changed_data(self):\n data = super().changed_data\n if 'set_usable_password' in data or ('password1' in data and 'password2' in data):\n return ['password']\n return []", + "docstring": "A form used to change the password of a user in the admin interface.", + "type": "class", + "file_path": "django\\django\\contrib\\auth\\forms.py", + "ast_data": "ClassDef name:AdminPasswordChangeForm Assign Assign Assign Call FunctionDef name:__init__ arg:self arg:user arguments arg arg arg arg Assign Call Call Assign If Call Assign Assign Assign Call FunctionDef name:clean arg:self arguments arg Call Call Return return:yes Call Call FunctionDef name:save arg:self arg:commit arguments arg arg Return return:yes Call FunctionDef name:changed_data arg:self arguments arg Assign Call If BoolOp Compare BoolOp Compare Compare Return return:yes Return return:no" + }, + { + "library": "numpy", + "name": "_check_for_import_lib", + "source_code": "def _check_for_import_lib():\n major_version, minor_version = tuple(sys.version_info[:2])\n patterns = ['libpython%d%d.a', 'libpython%d%d.dll.a', 'libpython%d.%d.dll.a']\n stems = [sys.prefix]\n if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:\n stems.append(sys.base_prefix)\n elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix:\n stems.append(sys.real_prefix)\n sub_dirs = ['libs', 'lib']\n candidates = []\n for pat in patterns:\n filename = pat % (major_version, minor_version)\n for stem_dir in stems:\n for folder in sub_dirs:\n candidates.append(os.path.join(stem_dir, folder, filename))\n for fullname in candidates:\n if os.path.isfile(fullname):\n return (True, fullname)\n return (False, candidates[0])", + "docstring": "Check if an import library for the Python runtime already exists.", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\mingw32ccompiler.py", + "ast_data": "FunctionDef name:_check_for_import_lib arguments Assign Call Assign Assign If BoolOp Call Compare Call If BoolOp Call Compare Call Assign Assign For Assign For For Call Call For If Call Return return:yes Return return:yes" + }, + { + "library": "scrapy", + "name": "_configure", + "source_code": "def _configure(self, options: dict[str, Any], dont_fail: bool=False) -> None:\n self.encoding: str | None = options.pop('encoding', None)\n self.fields_to_export: Mapping[str, str] | Iterable[str] | None = options.pop('fields_to_export', None)\n self.export_empty_fields: bool = options.pop('export_empty_fields', False)\n self.indent: int | None = options.pop('indent', None)\n if not dont_fail and options:\n raise TypeError(f'Unexpected options: {', '.join(options.keys())}')", + "docstring": "Configure the exporter by popping options from the `` methods)", + "type": "method", + "file_path": "scrapy\\scrapy\\exporters.py", + "ast_data": "FunctionDef name:_configure arg:self arg:options arg:dont_fail arguments arg arg arg Call Call Call Call If BoolOp Raise Call Call Call" + }, + { + "library": "kornia", + "name": "YcbcrToRgb", + "source_code": "class YcbcrToRgb(Module):\n ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n def forward(self, image: Tensor) -> Tensor:\n return ycbcr_to_rgb(image)", + "docstring": "Convert an image from YCbCr to Rgb. The image data is assumed to be in the range of (0, 1). Returns: RGB version of the image. Shape: - image: :math: - output: :math: Examples: >>> input = torch.rand(2, 3, 4, 5) >>> rgb = YcbcrToRgb() >>> output = rgb(input) # 2x3x4x5", + "type": "class", + "file_path": "kornia\\kornia\\color\\ycbcr.py", + "ast_data": "ClassDef name:YcbcrToRgb FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_remove_empty_lines", + "source_code": "def _remove_empty_lines(self, lines: list[list[T]]) -> list[list[T]]:\n return [line for line in lines if any((not isinstance(e, str) or e.strip() for e in line))]", + "docstring": "Returns the list of lines without the empty ones. With fixed-width fields, empty lines become arrays of empty strings. See PythonParser._remove_empty_lines.", + "type": "method", + "file_path": "pandas\\pandas\\io\\parsers\\python_parser.py", + "ast_data": "FunctionDef name:_remove_empty_lines arg:self arg:lines arguments arg arg Return return:yes Call BoolOp Call Call" + }, + { + "library": "pytorch", + "name": "replace_indirect", + "source_code": "def replace_indirect(self, old, new):\n if str(old) == str(new):\n return\n assert self.indexing is not None\n self.indexing = {k: sympy_subs(v, {old: new}) for k, v in self.indexing.items()}", + "docstring": "Swap in a variable used in indirect indexing", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\loop_body.py", + "ast_data": "FunctionDef name:replace_indirect arg:self arg:old arg:new arguments arg arg arg If Compare Call Call Return return:no Compare Assign Call Call" + }, + { + "library": "pytorch", + "name": "_stop_workers", + "source_code": "@abc.abstractmethod\ndef _stop_workers(self, worker_group: WorkerGroup) -> None:\n raise NotImplementedError", + "docstring": "Stop all workers in the given worker group. Implementors must deal with workers in all states defined by ``. That is, it must gracefully handle stopping non-existent workers, unhealthy (stuck) workers, etc.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py", + "ast_data": "FunctionDef name:_stop_workers arg:self arg:worker_group arguments arg arg Raise" + }, + { + "library": "pytorch", + "name": "gen_lists_of_dims", + "source_code": "def gen_lists_of_dims(num_tensors: int, dim_size: int, counter: int):\n res = []\n for _ in range(num_tensors):\n dims, counter = gen_tensor_dims(dim_size, counter)\n res.append(dims)\n return (res, counter)", + "docstring": "Generate lists of DVar to represent tensor dimensions Args: num_tensors: the required number of tensors dim_size: the number of dimensions for each tensor counter: variable tracking Returns: A list of a list of tensor dimensions", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", + "ast_data": "FunctionDef name:gen_lists_of_dims arg:num_tensors arg:dim_size arg:counter arguments arg arg arg Assign For Call Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "double_output", + "source_code": "def double_output(func, argtypes, errcheck=False, strarg=False, cpl=False):\n func.argtypes = argtypes\n func.restype = c_double\n if errcheck:\n func.errcheck = partial(check_arg_errcode, cpl=cpl)\n if strarg:\n func.errcheck = check_str_arg\n return func", + "docstring": "Generate a ctypes function that returns a double value.", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\generation.py", + "ast_data": "FunctionDef name:double_output arg:func arg:argtypes arg:errcheck arg:strarg arg:cpl arguments arg arg arg arg arg Assign Assign If Assign Call If Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "one_host_numpy_dataset", + "source_code": "def one_host_numpy_dataset(numpy_input, colocate_with, session):\n\n def create_colocated_variable(next_creator, **kwargs):\n kwargs['colocate_with'] = colocate_with\n return next_creator(**kwargs)\n numpy_flat = nest.flatten(numpy_input)\n with variable_scope.variable_creator_scope(create_colocated_variable):\n vars_flat = tuple((variable_v1.VariableV1(array_ops.zeros(i.shape, i.dtype), trainable=False) for i in numpy_flat))\n for v, i in zip(vars_flat, numpy_flat):\n init_var_from_numpy(v, i, session)\n vars_nested = nest.pack_sequence_as(numpy_input, vars_flat)\n return dataset_ops.Dataset.from_tensor_slices(vars_nested)", + "docstring": "Create a dataset on from .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\numpy_dataset.py", + "ast_data": "FunctionDef name:one_host_numpy_dataset arg:numpy_input arg:colocate_with arg:session arguments arg arg arg FunctionDef name:create_colocated_variable arg:next_creator arguments arg arg Assign Return return:yes Call Assign Call With Call Assign Call Call Call For Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "with_rank_at_most", + "source_code": "def with_rank_at_most(self, rank):\n if self.rank is not None and self.rank > rank:\n raise ValueError('Shape %s must have rank at most %d' % (self, rank))\n else:\n return self", + "docstring": "Returns a shape based on with at most the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as with at most the given rank. Raises: ValueError: If does not represent a shape with at most the given .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", + "ast_data": "FunctionDef name:with_rank_at_most arg:self arg:rank arguments arg arg If BoolOp Compare Compare Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "poisson", + "source_code": "@dispatch.add_dispatch_support\ndef poisson(y_true, y_pred):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n return backend.mean(y_pred - y_true * math_ops.log(y_pred + backend.epsilon()), axis=-1)", + "docstring": "Computes the Poisson loss between y_true and y_pred. The Poisson loss is the mean of the elements of the . Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.poisson(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_pred = y_pred + 1e-7 >>> assert np.allclose( ... loss.numpy(), np.mean(y_pred - y_true * np.log(y_pred), axis=-1), ... atol=1e-5) Args: y_true: Ground truth values. shape = . y_pred: The predicted values. shape = . Returns: Poisson loss value. shape = . Raises: InvalidArgumentError: If and have incompatible shapes.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:poisson arg:y_true arg:y_pred arguments arg arg Assign Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "sym_or", + "source_code": "def sym_or(x: BoolLikeType, *others: BoolLikeType) -> BoolLikeType:\n assert isinstance(x, (bool, SymBool))\n if len(others) == 0:\n return x\n for y in others:\n assert isinstance(y, (bool, SymBool))\n x = operator.or_(x, y)\n return x", + "docstring": "or, but for symbolic expressions, without bool casting.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:sym_or arg:x arguments arg arg Call If Compare Call Return return:yes For Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "wrapped_flat_buffer_file_to_mlir", + "source_code": "def wrapped_flat_buffer_file_to_mlir(model, input_is_filepath):\n return _pywrap_converter_api.FlatBufferToMlir(model, input_is_filepath)", + "docstring": "Wraps FlatBufferFileToMlir with lazy loader.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\lite\\python\\wrap_converter.py", + "ast_data": "FunctionDef name:wrapped_flat_buffer_file_to_mlir arg:model arg:input_is_filepath arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "index_to_string_table_from_tensor", + "source_code": "def index_to_string_table_from_tensor(vocabulary_list, default_value='UNK', name=None):\n if vocabulary_list is None:\n raise ValueError('`vocabulary_list` argument must be specified.')\n with ops.name_scope(name, 'index_to_string'):\n vocabulary_list = ops.convert_to_tensor(vocabulary_list, dtypes.string)\n num_elements = array_ops.size(vocabulary_list)\n keys = math_ops.cast(math_ops.range(num_elements), dtypes.int64)\n init = KeyValueTensorInitializer(keys, vocabulary_list, dtypes.int64, dtypes.string, name='table_init')\n return StaticHashTableV1(init, default_value)", + "docstring": "Returns a lookup table that maps a of indices into strings. This operation constructs a lookup table to map int64 indices into string values. The mapping is initialized from a string 1-D where each element is a value and the corresponding index within the tensor is the key. Any input which does not have a corresponding index in 'vocabulary_list' (an out-of-vocabulary entry) is assigned the The underlying table must be initialized by calling or once. Elements in cannot have duplicates, otherwise when executing the table initializer op, it will throw a . Sample Usages: Args: vocabulary_list: A 1-D string that specifies the strings to map from indices. default_value: The value to use for out-of-vocabulary indices. name: A name for this op (optional). Returns: The lookup table to map a string values associated to a given index . Raises: ValueError: when is not set.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:index_to_string_table_from_tensor arg:vocabulary_list arg:default_value arg:name arguments arg arg arg If Compare Raise Call With Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "msvc_runtime_version", + "source_code": "def msvc_runtime_version():\n msc_pos = sys.version.find('MSC v.')\n if msc_pos != -1:\n msc_ver = int(sys.version[msc_pos + 6:msc_pos + 10])\n else:\n msc_ver = None\n return msc_ver", + "docstring": "Return version of MSVC runtime library, as defined by __MSC_VER__ macro", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\misc_util.py", + "ast_data": "FunctionDef name:msvc_runtime_version arguments Assign Call If Compare Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "AtomicFunction", + "source_code": "@tf_export('types.experimental.AtomicFunction', v1=[])\nclass AtomicFunction(Callable):\n\n def call_with_captures(self, args, kwargs, captures):\n pass", + "docstring": "Base class for graph functions. An encapsulates a single graph function definition. can be called directly only if no captures are needed according to the . If captures are present, please use instead. does not support gradients. Please use the parent if you need gradient support.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\types\\core.py", + "ast_data": "ClassDef name:AtomicFunction FunctionDef name:call_with_captures arg:self arg:args arg:kwargs arg:captures arguments arg arg arg arg Call" + }, + { + "library": "django", + "name": "_encode_parts", + "source_code": "def _encode_parts(self, messages, encode_empty=False):\n if messages or encode_empty:\n return self.signer.sign_object(messages, serializer=MessagePartGatherSerializer, compress=True)", + "docstring": "Return an encoded version of the serialized messages list which can be stored as plain text. Since the data will be retrieved from the client-side, the encoded data also contains a hash to ensure that the data was not tampered with.", + "type": "method", + "file_path": "django\\django\\contrib\\messages\\storage\\cookie.py", + "ast_data": "FunctionDef name:_encode_parts arg:self arg:messages arg:encode_empty arguments arg arg arg If BoolOp Return return:yes Call" + }, + { + "library": "django", + "name": "widget_attrs", + "source_code": "def widget_attrs(self, widget):\n return {}", + "docstring": "Given a Widget instance (*not* a Widget class), return a dictionary of any HTML attributes that should be added to the Widget, based on this Field.", + "type": "method", + "file_path": "django\\django\\forms\\fields.py", + "ast_data": "FunctionDef name:widget_attrs arg:self arg:widget arguments arg arg Return return:no" + }, + { + "library": "tensorflow", + "name": "num_clients", + "source_code": "@tf_export('experimental.dtensor.num_clients', v1=[])\ndef num_clients() -> int:\n if is_local_mode():\n return 1\n return len(jobs())", + "docstring": "Returns the number of clients in this DTensor cluster.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\config.py", + "ast_data": "FunctionDef name:num_clients arguments If Call Return return:yes Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "_ravel", + "source_code": "def _ravel(array, xp=None):\n xp, _ = get_namespace(array, xp=xp)\n if _is_numpy_namespace(xp):\n array = numpy.asarray(array)\n return xp.asarray(numpy.ravel(array, order='C'))\n return xp.reshape(array, shape=(-1,))", + "docstring": "Array API compliant version of np.ravel. For non numpy namespaces, it just returns a flattened array, that might be or not be a copy.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py", + "ast_data": "FunctionDef name:_ravel arg:array arg:xp arguments arg arg Assign Call If Call Assign Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "correlate1d", + "source_code": "@_ni_docstrings.docfiller\ndef correlate1d(input, weights, axis=-1, output=None, mode='reflect', cval=0.0, origin=0):\n input = np.asarray(input)\n weights = np.asarray(weights)\n complex_input = input.dtype.kind == 'c'\n complex_weights = weights.dtype.kind == 'c'\n if complex_input or complex_weights:\n if complex_weights:\n weights = weights.conj()\n weights = weights.astype(np.complex128, copy=False)\n kwargs = dict(axis=axis, mode=mode, origin=origin)\n output = _ni_support._get_output(output, input, complex_output=True)\n return _complex_via_real_components(correlate1d, input, weights, output, cval, **kwargs)\n output = _ni_support._get_output(output, input)\n weights = np.asarray(weights, dtype=np.float64)\n if weights.ndim != 1 or weights.shape[0] < 1:\n raise RuntimeError('no filter weights given')\n if not weights.flags.contiguous:\n weights = weights.copy()\n axis = normalize_axis_index(axis, input.ndim)\n if _invalid_origin(origin, len(weights)):\n raise ValueError('Invalid origin; origin must satisfy -(len(weights) // 2) <= origin <= (len(weights)-1) // 2')\n mode = _ni_support._extend_mode_to_code(mode)\n _nd_image.correlate1d(input, weights, axis, output, mode, cval, origin)\n return output", + "docstring": "Calculate a 1-D correlation along the given axis. The lines of the array along the given axis are correlated with the given weights. Parameters ---------- %(input)s weights : array 1-D sequence of numbers. %(axis)s %(output)s %(mode_reflect)s %(cval)s %(origin)s Returns ------- result : ndarray Correlation result. Has the same shape as . Examples -------- >>> from scipy.ndimage import correlate1d >>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) array([ 8, 26, 8, 12, 7, 28, 36, 9])", + "type": "function", + "file_path": "scipy\\scipy\\ndimage\\_filters.py", + "ast_data": "FunctionDef name:correlate1d arg:input arg:weights arg:axis arg:output arg:mode arg:cval arg:origin arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Compare Assign Compare If BoolOp If Assign Call Assign Call Assign Call Assign Call Return return:yes Call Assign Call Assign Call If BoolOp Compare Compare Raise Call If Assign Call Assign Call If Call Call Raise Call Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "save", + "source_code": "def save(self, must_create=False):\n self._session_key = self._get_session_key()\n self.modified = True", + "docstring": "To save, get the session key as a securely signed string and then set the modified flag so that the cookie is set on the client for the current request.", + "type": "method", + "file_path": "django\\django\\contrib\\sessions\\backends\\signed_cookies.py", + "ast_data": "FunctionDef name:save arg:self arg:must_create arguments arg arg Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, dump_root, partition_graphs=None, validate=True):\n if not gfile.IsDirectory(dump_root):\n raise IOError('Dump root directory %s does not exist' % dump_root)\n self._core_metadata = []\n self._dump_root = dump_root\n self._load_core_metadata()\n self._load_fetches_info()\n self._load_feeds_info()\n self._load_all_device_dumps(partition_graphs, validate)\n self._python_graph = None", + "docstring": "constructor. Args: dump_root: () path to the dump root directory. partition_graphs: A repeated field of GraphDefs representing the partition graphs executed by the TensorFlow runtime. validate: () whether the dump files are to be validated against the partition graphs. Raises: IOError: If dump_root does not exist as a directory. ValueError: If more than one core metadata file is found under the dump root directory.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:dump_root arg:partition_graphs arg:validate arguments arg arg arg arg If Call Raise Call Assign Assign Call Call Call Call Assign" + }, + { + "library": "pandas", + "name": "navbar_add_info", + "source_code": "@staticmethod\ndef navbar_add_info(context):\n for i, item in enumerate(context['navbar']):\n context['navbar'][i] = dict(item, has_subitems=isinstance(item['target'], list), slug=item['name'].replace(' ', '-').lower())\n return context", + "docstring": "Items in the main navigation bar can be direct links, or dropdowns with subitems. This context preprocessor adds a boolean field `` field to be used as a CSS id.", + "type": "method", + "file_path": "pandas\\web\\pandas_web.py", + "ast_data": "FunctionDef name:navbar_add_info arg:context arguments arg For Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "backtick_repl", + "source_code": "def backtick_repl(matchobj):\n if matchobj.group(2) != ' ':\n post = '\\\\ ' + matchobj.group(2)\n else:\n post = matchobj.group(2)\n return '``' + matchobj.group(1) + '``' + post", + "docstring": "repl to add an escaped space following a code block if needed", + "type": "function", + "file_path": "numpy\\tools\\changelog.py", + "ast_data": "FunctionDef name:backtick_repl arg:matchobj arguments arg If Compare Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_global_config_filter", + "source_code": "def _global_config_filter(nodes: list[Node]) -> bool:\n num_nodes_in_default_quantizable_ops = sum((node.target in default_quantizable_ops for node in nodes))\n if num_nodes_in_default_quantizable_ops > 1:\n raise NotImplementedError('Several nodes within a single pattern are default quantizable operations.')\n return num_nodes_in_default_quantizable_ops == 1", + "docstring": "Filter function for global configuration. This filter function takes a list of nodes and returns True if there is exactly one node in the list that is a default quantizable operation, False otherwise.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py", + "ast_data": "FunctionDef name:_global_config_filter arg:nodes arguments arg Assign Call Compare If Compare Raise Call Return return:yes Compare" + }, + { + "library": "sphinx", + "name": "visit_Try", + "source_code": "def visit_Try(self, node: ast.Try) -> None:\n for subnode in node.body:\n self.visit(subnode)\n for subnode in node.orelse:\n self.visit(subnode)", + "docstring": "Handles Try node and processes body and else-clause. .. note:: pycode parser ignores objects definition in except-clause.", + "type": "method", + "file_path": "sphinx\\sphinx\\pycode\\parser.py", + "ast_data": "FunctionDef name:visit_Try arg:self arg:node arguments arg arg For Call For Call" + }, + { + "library": "scikit-learn", + "name": "check_classifier_not_supporting_multiclass", + "source_code": "def check_classifier_not_supporting_multiclass(name, estimator_orig):\n estimator = clone(estimator_orig)\n set_random_state(estimator)\n X, y = make_classification(n_samples=100, n_classes=3, n_informative=3, n_clusters_per_class=1, random_state=0)\n err_msg = \" The estimator tag `tags.classifier_tags.multi_class` is False for {name}\\n which means it does not support multiclass classification. However, it does\\n not raise the right `ValueError` when calling fit with a multiclass dataset,\\n including the error message 'Only binary classification is supported.' This\\n can be achieved by the following pattern:\\n\\n y_type = type_of_target(y, input_name='y', raise_unknown=True)\\n if y_type != 'binary':\\n raise ValueError(\\n 'Only binary classification is supported. The type of the target '\\n f'is {{y_type}}.'\\n )\\n \".format(name=name)\n err_msg = textwrap.dedent(err_msg)\n with raises(ValueError, match='Only binary classification is supported.', err_msg=err_msg):\n estimator.fit(X, y)", + "docstring": "Check that if the classifier has tags.classifier_tags.multi_class=False, then it should raise a ValueError when calling fit with a multiclass dataset. This test is not yielded if the tag is not False.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py", + "ast_data": "FunctionDef name:check_classifier_not_supporting_multiclass arg:name arg:estimator_orig arguments arg arg Assign Call Call Assign Call Assign Call Assign Call With Call Call" + }, + { + "library": "tensorflow", + "name": "assert_rank_in_v2", + "source_code": "@tf_export('debugging.assert_rank_in', v1=[])\n@dispatch.add_dispatch_support\ndef assert_rank_in_v2(x, ranks, message=None, name=None):\n return assert_rank_in(x=x, ranks=ranks, message=message, name=name)", + "docstring": "Assert that has a rank in . This Op checks that the rank of is in . If has a different rank, , as well as the shape of are printed, and is raised. Args: x: . ranks: of scalar objects. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to \"assert_rank_in\". Returns: Op raising unless rank of is in . If static checks determine has matching rank, a is returned. This can be used with inside of s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: does not have rank in , but the rank cannot be statically determined. ValueError: If static checks determine has mismatched rank.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py", + "ast_data": "FunctionDef name:assert_rank_in_v2 arg:x arg:ranks arg:message arg:name arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "__iter__", + "source_code": "def __iter__(self):\n _data, _mask = (self._data, self._mask)\n if _mask is nomask:\n yield from _data\n else:\n for d, m in zip(_data, _mask):\n if m:\n yield masked\n else:\n yield d", + "docstring": "Defines an iterator for mvoid", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Assign If Compare For Call If" + }, + { + "library": "tensorflow", + "name": "add_c_function", + "source_code": "def add_c_function(self, c_func):\n self.ensure_initialized()\n pywrap_tfe.TFE_ContextAddFunction(self._handle, c_func)", + "docstring": "Add a C API TF_Function to the context. Once added, the function (identified by its name) can be executed like any other operation. Args: c_func: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:add_c_function arg:self arg:c_func arguments arg arg Call Call" + }, + { + "library": "pytorch", + "name": "put", + "source_code": "def put(self, req: Any) -> None:\n if not self.alive():\n self.start()\n TuningProcess.send(req, self.write_pipe)", + "docstring": "Push a work item to the child process.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\autotune_process.py", + "ast_data": "FunctionDef name:put arg:self arg:req arguments arg arg If Call Call Call" + }, + { + "library": "matplotlib", + "name": "_split_code_at_show", + "source_code": "def _split_code_at_show(text, function_name):\n is_doctest = contains_doctest(text)\n if function_name is None:\n parts = []\n part = []\n for line in text.split('\\n'):\n if not is_doctest and line.startswith('plt.show(') or (is_doctest and line.strip() == '>>> plt.show()'):\n part.append(line)\n parts.append('\\n'.join(part))\n part = []\n else:\n part.append(line)\n if '\\n'.join(part).strip():\n parts.append('\\n'.join(part))\n else:\n parts = [text]\n return (is_doctest, parts)", + "docstring": "Split code at plt.show().", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\sphinxext\\plot_directive.py", + "ast_data": "FunctionDef name:_split_code_at_show arg:text arg:function_name arguments arg arg Assign Call If Compare Assign Assign For Call If BoolOp BoolOp Call BoolOp Compare Call Call Call Call Assign Call If Call Call Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "utilization", + "source_code": "def utilization(device: Optional[Union[Device, int]]=None) -> int:\n if not torch.version.hip:\n handle = _get_pynvml_handler(device)\n device = _get_nvml_device_index(device)\n handle = pynvml.nvmlDeviceGetHandleByIndex(device)\n return pynvml.nvmlDeviceGetUtilizationRates(handle).gpu\n else:\n return _get_amdsmi_utilization(device)", + "docstring": "Return the percent of time over the past sample period during which one or more kernels was executing on the GPU as given by . Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `` (default). Warning: Each sample period may be between 1 second and 1/6 second, depending on the product being queried.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:utilization arg:device arguments arg If Assign Call Assign Call Assign Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "ReLU", + "source_code": "class ReLU(Module):\n __constants__ = ['inplace']\n inplace: bool\n\n def __init__(self, inplace: bool=False):\n super().__init__()\n self.inplace = inplace\n\n def forward(self, input: Tensor) -> Tensor:\n return F.relu(input, inplace=self.inplace)\n\n def extra_repr(self) -> str:\n inplace_str = 'inplace=True' if self.inplace else ''\n return inplace_str", + "docstring": "Applies the rectified linear unit function element-wise. :math: Args: inplace: can optionally do the operation in-place. Default: `(*)*(*)`, same shape as the input. .. image:: ../scripts/activation_images/ReLU.png Examples:: >>> m = nn.ReLU() >>> input = torch.randn(2) >>> output = m(input) An implementation of CReLU - >>> m = nn.ReLU() >>> input = torch.randn(2).unsqueeze(0) >>> output = torch.cat((m(input), m(-input)))", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\activation.py", + "ast_data": "ClassDef name:ReLU Assign FunctionDef name:__init__ arg:self arg:inplace arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Assign Return return:yes" + }, + { + "library": "pandas", + "name": "column_data_offsets", + "source_code": "def column_data_offsets(self) -> np.ndarray:\n return np.asarray(self._column_data_offsets, dtype=np.int64)", + "docstring": "Return a numpy int64 array of the column offsets", + "type": "method", + "file_path": "pandas\\pandas\\io\\sas\\sas7bdat.py", + "ast_data": "FunctionDef name:column_data_offsets arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_internal_get_values", + "source_code": "def _internal_get_values(self) -> ArrayLike:\n if needs_i8_conversion(self.categories.dtype):\n return self.categories.take(self._codes, fill_value=NaT)._values\n elif is_integer_dtype(self.categories.dtype) and -1 in self._codes:\n return self.categories.astype('object').take(self._codes, fill_value=np.nan)._values\n return np.array(self)", + "docstring": "Return the values. For internal compatibility with pandas formatting. Returns ------- np.ndarray or ExtensionArray A numpy array or ExtensionArray of the same dtype as categorical.categories.dtype.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", + "ast_data": "FunctionDef name:_internal_get_values arg:self arguments arg If Call Return return:yes Call If BoolOp Call Compare Return return:yes Call Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "generate_a_pyrex_source", + "source_code": "def generate_a_pyrex_source(self, base, ext_name, source, extension):\n return []", + "docstring": "Pyrex is not supported, but some projects monkeypatch this method. That allows compiling Cython code, see gh-6955. This method will remain here for compatibility reasons.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\command\\build_src.py", + "ast_data": "FunctionDef name:generate_a_pyrex_source arg:self arg:base arg:ext_name arg:source arg:extension arguments arg arg arg arg arg Return return:no" + }, + { + "library": "seaborn", + "name": "__call__", + "source_code": "def __call__(self, x1, x2=None, weights=None):\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)", + "docstring": "Count the occurrences in each bin, maybe normalize.", + "type": "method", + "file_path": "seaborn\\seaborn\\_statistics.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:x1 arg:x2 arg:weights arguments arg arg arg arg If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "set_jac_params", + "source_code": "def set_jac_params(self, *args):\n self.jac_params = args\n return self", + "docstring": "Set extra parameters for user-supplied function jac.", + "type": "method", + "file_path": "scipy\\scipy\\integrate\\_ode.py", + "ast_data": "FunctionDef name:set_jac_params arg:self arguments arg arg Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "justknobs_getval_int", + "source_code": "def justknobs_getval_int(name: str) -> int:\n return 0", + "docstring": "Read warning on justknobs_check", + "type": "function", + "file_path": "pytorch\\torch\\_utils_internal.py", + "ast_data": "FunctionDef name:justknobs_getval_int arg:name arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_dense_var_to_tensor", + "source_code": "def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n if values_util.is_saving_non_distributed():\n return ops.convert_to_tensor(self._primary, dtype=dtype, name=name, as_ref=as_ref)\n with distribute_lib.enter_or_assert_strategy(self._distribute_strategy):\n replica_context = distribute_lib.get_replica_context()\n if replica_context is not None and distribute_lib.in_variable_sync_on_read_context():\n if self._aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA:\n return ops.convert_to_tensor(self._get_replica(0), dtype=dtype, name=name, as_ref=as_ref)\n if self._aggregation == vs.VariableAggregation.SUM:\n values_util.mark_as_unsaveable()\n reduced = replica_context.strategy.extended._replica_ctx_all_reduce(reduce_util.ReduceOp.from_variable_aggregation(self._aggregation), self._get().read_value())\n return ops.convert_to_tensor(reduced, dtype=dtype, name=name, as_ref=as_ref)\n return ops.convert_to_tensor(self._get(), dtype=dtype, name=name, as_ref=as_ref)", + "docstring": "Converts a SyncOnReadVariable to a tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "FunctionDef name:_dense_var_to_tensor arg:self arg:dtype arg:name arg:as_ref arguments arg arg arg arg If Call Return return:yes Call With Call Assign Call If BoolOp Compare Call If Compare Return return:yes Call Call If Compare Call Assign Call Call Call Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "norm_diff", + "source_code": "def norm_diff(A, norm=2, msg=True, random_state=None):\n if msg:\n print('... computing %s norm ...' % norm)\n if norm == 2:\n v0 = _init_arpack_v0(min(A.shape), random_state)\n value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False, v0=v0)\n elif sp.sparse.issparse(A):\n value = sp.sparse.linalg.norm(A, ord=norm)\n else:\n value = sp.linalg.norm(A, ord=norm)\n return value", + "docstring": "Compute the norm diff with the original matrix, when randomized SVD is called with *params. norm: 2 => spectral; 'fro' => Frobenius", + "type": "function", + "file_path": "scikit-learn\\benchmarks\\bench_plot_randomized_svd.py", + "ast_data": "FunctionDef name:norm_diff arg:A arg:norm arg:msg arg:random_state arguments arg arg arg arg If Call If Compare Assign Call Call Assign Call If Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "channel_range", + "source_code": "def channel_range(input, axis=0):\n size_of_tensor_dim = input.ndim\n axis_list = list(range(size_of_tensor_dim))\n axis_list.remove(axis)\n mins = min_over_ndim(input, axis_list)\n maxs = max_over_ndim(input, axis_list)\n assert mins.size(0) == input.size(axis), 'Dimensions of resultant channel range does not match size of requested axis'\n return maxs - mins", + "docstring": "Find the range of weights associated with a specific channel.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\_equalize.py", + "ast_data": "FunctionDef name:channel_range arg:input arg:axis arguments arg arg Assign Assign Call Call Call Assign Call Assign Call Compare Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_compute_device", + "source_code": "def _get_compute_device(module: nn.Module, ignored_params: set[nn.Parameter], device_from_device_id: Optional[torch.device], rank: int, device_handle: _FSDPDeviceHandle) -> torch.device:\n param = next(_get_orig_params(module, ignored_params), None)\n if param is not None and param.device.type != 'cpu':\n compute_device = param.device\n else:\n compute_device = torch.device(device_handle.current_device())\n if device_from_device_id is not None and compute_device != device_from_device_id:\n raise ValueError(f'Inconsistent compute device and `device_id` on rank {rank}: {compute_device} vs {device_from_device_id}')\n return compute_device", + "docstring": "Determine and return this FSDP instance's compute device. If the module is already on a non-CPU device, then the compute device is that non-CPU device. If the module is on CPU, then the compute device is the current device. Since this method should be called after materializing the module, any non-CPU device should not be meta device. For now, the compute device is always a CUDA or CUDA-like device with its explicit index. Precondition: ``.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py", + "ast_data": "FunctionDef name:_get_compute_device arg:module arg:ignored_params arg:device_from_device_id arg:rank arg:device_handle arguments arg arg arg arg arg Assign Call Call If BoolOp Compare Compare Assign Assign Call Call If BoolOp Compare Compare Raise Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "attach", + "source_code": "def attach(self, point, callback, failsafe=None, priority=None, **kwargs):\n self[point].append(Hook(callback, failsafe, priority, **kwargs))", + "docstring": "Append a new Hook made from the supplied arguments.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cprequest.py", + "ast_data": "FunctionDef name:attach arg:self arg:point arg:callback arg:failsafe arg:priority arguments arg arg arg arg arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "colocate_vars_with", + "source_code": "def colocate_vars_with(self, colocate_with_variable):\n\n def create_colocated_variable(next_creator, **kwargs):\n _require_strategy_scope_extended(self)\n kwargs['use_resource'] = True\n kwargs['colocate_with'] = colocate_with_variable\n return next_creator(**kwargs)\n _require_strategy_scope_extended(self)\n self._validate_colocate_with_variable(colocate_with_variable)\n return variable_scope.variable_creator_scope(create_colocated_variable)", + "docstring": "Scope that controls which devices variables will be created on. No operations should be added to the graph inside this scope, it should only be used when creating variables (some implementations work by changing variable creation, others work by using a tf.compat.v1.colocate_with() scope). This may only be used inside . Example usage: Args: colocate_with_variable: A variable created in this strategy's . Variables created while in the returned context manager will be on the same set of devices as . Returns: A context manager.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:colocate_vars_with arg:self arg:colocate_with_variable arguments arg arg FunctionDef name:create_colocated_variable arg:next_creator arguments arg arg Call Assign Assign Return return:yes Call Call Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "log_request_headers", + "source_code": "def log_request_headers(debug=False):\n h = [' %s: %s' % (k, v) for k, v in cherrypy.serving.request.header_list]\n cherrypy.log('\\nRequest Headers:\\n' + '\\n'.join(h), 'HTTP')", + "docstring": "Write request headers to the cherrypy error log.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\cptools.py", + "ast_data": "FunctionDef name:log_request_headers arg:debug arguments arg Assign Call Call" + }, + { + "library": "tensorflow", + "name": "scatter_update", + "source_code": "def scatter_update(self, sparse_delta, use_locking=False, name=None):\n raise NotImplementedError", + "docstring": "Assigns to this variable. Args: sparse_delta: to be assigned to this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:scatter_update arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg Raise" + }, + { + "library": "scikit-learn", + "name": "inverse_transform", + "source_code": "def inverse_transform(self, X):\n check_is_fitted(self)\n xp, _ = get_namespace(X)\n X = check_array(X, copy=self.copy, dtype=_array_api.supported_float_dtypes(xp), force_writeable=True, ensure_all_finite='allow-nan')\n X -= self.min_\n X /= self.scale_\n return X", + "docstring": "Undo the scaling of X according to feature_range. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data that will be transformed. It cannot be sparse. Returns ------- X_original : ndarray of shape (n_samples, n_features) Transformed data.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", + "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "decision_function", + "source_code": "def decision_function(self, X):\n return super().decision_function(X)", + "docstring": "Apply decision function to an array of samples. The decision function is equal (up to a constant factor) to the log-posterior of the model, i.e. . In a binary classification setting this instead corresponds to the difference . See :ref:. Parameters ---------- X : array-like of shape (n_samples, n_features) Array of samples (test vectors). Returns ------- y_scores : ndarray of shape (n_samples,) or (n_samples, n_classes) Decision function values related to each class, per sample. In the two-class case, the shape is , giving the log likelihood ratio of the positive class.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py", + "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "Poisson", + "source_code": "class Poisson(MeanMetricWrapper):\n\n def __init__(self, name='poisson', dtype=None):\n super(Poisson, self).__init__(poisson, name, dtype=dtype)", + "docstring": "Computes the Poisson metric between and . Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Poisson() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.49999997 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.99999994 Usage with API:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "ClassDef name:Poisson FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "get_tensor_spec", + "source_code": "def get_tensor_spec(t, dynamic_batch=False, name=None):\n if isinstance(t, type_spec.TypeSpec):\n spec = t\n elif is_extension_type(t):\n spec = t._type_spec\n elif hasattr(t, '_keras_history') and hasattr(t._keras_history[0], '_type_spec'):\n return t._keras_history[0]._type_spec\n elif hasattr(t, 'shape') and hasattr(t, 'dtype'):\n spec = tensor_lib.TensorSpec(shape=t.shape, dtype=t.dtype, name=name)\n else:\n return None\n if not dynamic_batch:\n return spec\n dynamic_batch_spec = copy.deepcopy(spec)\n shape = dynamic_batch_spec._shape\n if shape.rank is not None and shape.rank > 0:\n shape_list = shape.as_list()\n shape_list[0] = None\n dynamic_batch_spec._shape = tensor_shape.TensorShape(shape_list)\n return dynamic_batch_spec", + "docstring": "Returns a given a single or .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py", + "ast_data": "FunctionDef name:get_tensor_spec arg:t arg:dynamic_batch arg:name arguments arg arg arg If Call Assign If Call Assign If BoolOp Call Call Return return:yes If BoolOp Call Call Assign Call Return return:no If Return return:yes Assign Call Assign If BoolOp Compare Compare Assign Call Assign Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_get_nonzero_slices", + "source_code": "def _get_nonzero_slices(buf):\n x_nz, = buf.any(axis=0).nonzero()\n y_nz, = buf.any(axis=1).nonzero()\n if len(x_nz) and len(y_nz):\n l, r = x_nz[[0, -1]]\n b, t = y_nz[[0, -1]]\n return (slice(b, t + 1), slice(l, r + 1))\n else:\n return (slice(0, 0), slice(0, 0))", + "docstring": "Return the bounds of the nonzero region of a 2D array as a pair of slices. `` is returned.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:_get_nonzero_slices arg:buf arguments arg Assign Call Call Assign Call Call If BoolOp Call Call Assign Assign Return return:yes Call Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "geom", + "source_code": "@property\ndef geom(self):\n geom_ptr = capi.get_feat_geom_ref(self.ptr)\n return OGRGeometry(geom_api.clone_geom(geom_ptr))", + "docstring": "Return the OGR Geometry for this Feature.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py", + "ast_data": "FunctionDef name:geom arg:self arguments arg Assign Call Return return:yes Call Call" + }, + { + "library": "authlib", + "name": "parse_authorization_response", + "source_code": "def parse_authorization_response(self, url):\n token = dict(url_decode(urlparse.urlparse(url).query))\n self.token = token\n return token", + "docstring": "Extract parameters from the post authorization redirect response URL. :param url: The full URL that resulted from the user being redirected back from the OAuth provider to you, the client. :returns: A dict of parameters extracted from the URL.", + "type": "method", + "file_path": "authlib\\authlib\\oauth1\\client.py", + "ast_data": "FunctionDef name:parse_authorization_response arg:self arg:url arguments arg arg Assign Call Call Call Assign Return return:yes" + }, + { + "library": "kornia", + "name": "init_ray_dataset", + "source_code": "def init_ray_dataset(self, num_img_rays: Optional[Tensor]=None) -> None:\n if num_img_rays is None:\n self._init_uniform_ray_dataset()\n else:\n self._init_random_ray_dataset(num_img_rays)", + "docstring": "Initialize a ray dataset. Args: num_img_rays: If not None, number of rays to randomly cast from each camera: math: .", + "type": "method", + "file_path": "kornia\\kornia\\nerf\\data_utils.py", + "ast_data": "FunctionDef name:init_ray_dataset arg:self arg:num_img_rays arguments arg arg If Compare Call Call" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, size: tuple[int, int]) -> Tensor:\n h, w = size\n _dev = self.positional_encoding_gaussian_matrix.device\n device = _dev if isinstance(_dev, torch.device) else None\n grid = torch.ones((h, w), device=device, dtype=torch.float32)\n y_embed = grid.cumsum(dim=0) - 0.5\n x_embed = grid.cumsum(dim=1) - 0.5\n y_embed = y_embed / h\n x_embed = x_embed / w\n pe = self._pe_encoding(stack([x_embed, y_embed], dim=-1))\n return pe.permute(2, 0, 1)", + "docstring": "Generate positional encoding for a grid of the specified size.", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\prompt_encoder.py", + "ast_data": "FunctionDef name:forward arg:self arg:size arguments arg arg Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "convert_json_field_to_pandas_type", + "source_code": "def convert_json_field_to_pandas_type(field) -> str | CategoricalDtype:\n typ = field['type']\n if typ == 'string':\n return 'object'\n elif typ == 'integer':\n return field.get('extDtype', 'int64')\n elif typ == 'number':\n return field.get('extDtype', 'float64')\n elif typ == 'boolean':\n return field.get('extDtype', 'bool')\n elif typ == 'duration':\n return 'timedelta64'\n elif typ == 'datetime':\n if field.get('tz'):\n return f'datetime64[ns, {field['tz']}]'\n elif field.get('freq'):\n offset = to_offset(field['freq'])\n freq = PeriodDtype(offset)._freqstr\n return f'period[{freq}]'\n else:\n return 'datetime64[ns]'\n elif typ == 'any':\n if 'constraints' in field and 'ordered' in field:\n return CategoricalDtype(categories=field['constraints']['enum'], ordered=field['ordered'])\n elif 'extDtype' in field:\n return registry.find(field['extDtype'])\n else:\n return 'object'\n raise ValueError(f'Unsupported or invalid field type: {typ}')", + "docstring": "Converts a JSON field descriptor into its corresponding NumPy / pandas type Parameters ---------- field A JSON field descriptor Returns ------- dtype Raises ------ ValueError If the type of the provided field is unknown or currently unsupported Examples -------- >>> convert_json_field_to_pandas_type({\"name\": \"an_int\", \"type\": \"integer\"}) 'int64' >>> convert_json_field_to_pandas_type( ... { ... \"name\": \"a_categorical\", ... \"type\": \"any\", ... \"constraints\": {\"enum\": [\"a\", \"b\", \"c\"]}, ... \"ordered\": True, ... } ... ) CategoricalDtype(categories=['a', 'b', 'c'], ordered=True, categories_dtype=object) >>> convert_json_field_to_pandas_type({\"name\": \"a_datetime\", \"type\": \"datetime\"}) 'datetime64[ns]' >>> convert_json_field_to_pandas_type( ... {\"name\": \"a_datetime_with_tz\", \"type\": \"datetime\", \"tz\": \"US/Central\"} ... ) 'datetime64[ns, US/Central]'", + "type": "function", + "file_path": "pandas\\pandas\\io\\json\\_table_schema.py", + "ast_data": "FunctionDef name:convert_json_field_to_pandas_type arg:field arguments arg Assign If Compare Return return:yes If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes If Compare If Call Return return:yes If Call Assign Call Assign Call Return return:yes Return return:yes If Compare If BoolOp Compare Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Raise Call" + }, + { + "library": "tensorflow", + "name": "DistributedVariableTraceType", + "source_code": "class DistributedVariableTraceType(trace.TraceType):\n\n def __init__(self, distributed_variable):\n self.distributed_variable = distributed_variable\n self.components = (tuple(distributed_variable.shape.as_list()), distributed_variable.dtype)\n\n def is_subtype_of(self, other):\n return self == other\n\n def most_specific_common_supertype(self, others):\n return self if all((self == other for other in others)) else None\n\n def placeholder_value(self, placeholder_context=None):\n return self.distributed_variable\n\n def to_tensors(self, value):\n return []\n\n def cast(self, value, _):\n return value\n\n def __hash__(self) -> int:\n return hash(self.components)\n\n def __eq__(self, other) -> bool:\n if not isinstance(other, DistributedVariableTraceType):\n return False\n return self.components == other.components", + "docstring": "TraceType of DistributedVariable objects.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "ClassDef name:DistributedVariableTraceType FunctionDef name:__init__ arg:self arg:distributed_variable arguments arg arg Assign Assign Call Call FunctionDef name:is_subtype_of arg:self arg:other arguments arg arg Return return:yes Compare FunctionDef name:most_specific_common_supertype arg:self arg:others arguments arg arg Return return:yes Call Compare FunctionDef name:placeholder_value arg:self arg:placeholder_context arguments arg arg Return return:yes FunctionDef name:to_tensors arg:self arg:value arguments arg arg Return return:no FunctionDef name:cast arg:self arg:value arg:_ arguments arg arg arg Return return:yes FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Compare" + }, + { + "library": "matplotlib", + "name": "is_math_text", + "source_code": "def is_math_text(s):\n s = str(s)\n dollar_count = s.count('$') - s.count('\\\\$')\n even_dollars = dollar_count > 0 and dollar_count % 2 == 0\n return even_dollars", + "docstring": "Return whether the string *s* contains math expressions. This is done by checking whether *s* contains an even number of non-escaped dollar signs.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:is_math_text arg:s arguments arg Assign Call Assign Call Call Assign BoolOp Compare Compare Return return:yes" + }, + { + "library": "scipy", + "name": "estimate_spectral_norm", + "source_code": "def estimate_spectral_norm(A, its=20, rng=None):\n from scipy.sparse.linalg import aslinearoperator\n rng = np.random.default_rng(rng)\n A = aslinearoperator(A)\n if _is_real(A):\n return _backend.idd_snorm(A, its=its, rng=rng)\n else:\n return _backend.idz_snorm(A, its=its, rng=rng)", + "docstring": "Estimate spectral norm of a matrix by the randomized power method. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func: and :func:. Parameters ---------- A : :class: Matrix given as a :class: with the and methods (to apply the matrix and its adjoint). its : int, optional Number of power method iterations. rng : , optional Pseudorandom number generator state. When is None, a new is created using entropy from the operating system. Types other than are passed to to instantiate a `rand`, the argument is ignored. Returns ------- float Spectral norm estimate.", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\interpolative.py", + "ast_data": "FunctionDef name:estimate_spectral_norm arg:A arg:its arg:rng arguments arg arg arg Assign Call Assign Call If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "shift", + "source_code": "def shift(x, a, period=None, _cache=_cache):\n if isinstance(_cache, threading.local):\n if not hasattr(_cache, 'shift_cache'):\n _cache.shift_cache = {}\n _cache = _cache.shift_cache\n tmp = asarray(x)\n if iscomplexobj(tmp):\n return shift(tmp.real, a, period, _cache) + 1j * shift(tmp.imag, a, period, _cache)\n if period is not None:\n a = a * 2 * pi / period\n n = len(x)\n omega = _cache.get((n, a))\n if omega is None:\n if len(_cache) > 20:\n while _cache:\n _cache.popitem()\n\n def kernel_real(k, a=a):\n return cos(a * k)\n\n def kernel_imag(k, a=a):\n return sin(a * k)\n omega_real = convolve.init_convolution_kernel(n, kernel_real, d=0, zero_nyquist=0)\n omega_imag = convolve.init_convolution_kernel(n, kernel_imag, d=1, zero_nyquist=0)\n _cache[n, a] = (omega_real, omega_imag)\n else:\n omega_real, omega_imag = omega\n overwrite_x = _datacopied(tmp, x)\n return convolve.convolve_z(tmp, omega_real, omega_imag, overwrite_x=overwrite_x)", + "docstring": "Shift periodic sequence x by a: y(u) = x(u+a). If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = exp(j*a*2*pi/period*sqrt(-1)) * x_f Parameters ---------- x : array_like The array to take the pseudo-derivative from. a : float Defines the parameters of the sinh/sinh pseudo-differential period : float, optional The period of the sequences x and y. Default period is ``.", + "type": "function", + "file_path": "scipy\\scipy\\fftpack\\_pseudo_diffs.py", + "ast_data": "FunctionDef name:shift arg:x arg:a arg:period arg:_cache arguments arg arg arg arg If Call If Call Assign Assign Assign Call If Call Return return:yes Call Call If Compare Assign Assign Call Assign Call If Compare If Compare Call While Call FunctionDef name:kernel_real arg:k arg:a arguments arg arg Return return:yes Call FunctionDef name:kernel_imag arg:k arg:a arguments arg arg Return return:yes Call Assign Call Assign Call Assign Assign Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "get_locator", + "source_code": "def get_locator(self, dmin, dmax):\n tot_sec = (dmax - dmin).total_seconds()\n if abs(tot_sec) < self.minticks:\n self._freq = -1\n locator = MilliSecondLocator(self.tz)\n locator.set_axis(self.axis)\n locator.axis.set_view_interval(*self.axis.get_view_interval())\n locator.axis.set_data_interval(*self.axis.get_data_interval())\n return locator\n return mdates.AutoDateLocator.get_locator(self, dmin, dmax)", + "docstring": "Pick the best locator based on a distance.", + "type": "method", + "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py", + "ast_data": "FunctionDef name:get_locator arg:self arg:dmin arg:dmax arguments arg arg arg Assign Call If Compare Call Assign Assign Call Call Call Call Call Call Return return:yes Return return:yes Call" + }, + { + "library": "django", + "name": "build_attrs", + "source_code": "def build_attrs(self, base_attrs, extra_attrs=None):\n return {**base_attrs, **(extra_attrs or {})}", + "docstring": "Build an attribute dictionary.", + "type": "method", + "file_path": "django\\django\\forms\\widgets.py", + "ast_data": "FunctionDef name:build_attrs arg:self arg:base_attrs arg:extra_attrs arguments arg arg arg Return return:yes BoolOp" + }, + { + "library": "pytorch", + "name": "prune", + "source_code": "def prune(self, t, default_mask=None, importance_scores=None):\n if importance_scores is not None:\n assert importance_scores.shape == t.shape, 'importance_scores should have the same shape as tensor t'\n else:\n importance_scores = t\n default_mask = default_mask if default_mask is not None else torch.ones_like(t)\n return t * self.compute_mask(importance_scores, default_mask=default_mask)", + "docstring": "Compute and returns a pruned version of input tensor `compute_mask`.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\utils\\prune.py", + "ast_data": "FunctionDef name:prune arg:self arg:t arg:default_mask arg:importance_scores arguments arg arg arg arg If Compare Compare Assign Assign Compare Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "buf_accesses", + "source_code": "@cache_on_self\ndef buf_accesses(self) -> dict[str, list[Dep]]:\n buf_accesses = collections.defaultdict(list)\n for node in self.scheduler_nodes():\n for access in node.read_writes.reads | node.read_writes.writes:\n buf_accesses[access.name].append(access)\n return buf_accesses", + "docstring": "only needed for config.benchmark_kernel", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\simd_kernel_features.py", + "ast_data": "FunctionDef name:buf_accesses arg:self arguments arg Assign Call For Call For Call Return return:yes" + }, + { + "library": "pandas", + "name": "_convert_to_style_kwargs", + "source_code": "@classmethod\ndef _convert_to_style_kwargs(cls, style_dict: dict) -> dict[str, Serialisable]:\n _style_key_map = {'borders': 'border'}\n style_kwargs: dict[str, Serialisable] = {}\n for k, v in style_dict.items():\n k = _style_key_map.get(k, k)\n _conv_to_x = getattr(cls, f'_convert_to_{k}', lambda x: None)\n new_v = _conv_to_x(v)\n if new_v:\n style_kwargs[k] = new_v\n return style_kwargs", + "docstring": "Convert a style_dict to a set of kwargs suitable for initializing or updating-on-copy an openpyxl v2 style object. Parameters ---------- style_dict : dict A dict with zero or more of the following keys (or their synonyms). 'font' 'fill' 'border' ('borders') 'alignment' 'number_format' 'protection' Returns ------- style_kwargs : dict A dict with the same, normalized keys as `` but each value has been replaced with a native openpyxl style object of the appropriate class.", + "type": "method", + "file_path": "pandas\\pandas\\io\\excel\\_openpyxl.py", + "ast_data": "FunctionDef name:_convert_to_style_kwargs arg:cls arg:style_dict arguments arg arg Assign For Call Assign Call Assign Call arguments arg Assign Call If Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_TestBenchmarkRequest", + "source_code": "class _TestBenchmarkRequest(BenchmarkRequest):\n\n def __init__(self, result: float=0.0, device: Optional[int]=None, sleep: Optional[float]=None, exc: Optional[Exception]=None, crash: bool=False):\n self.result = result\n self.device = device\n self.sleep = sleep\n self.exc = exc\n self.crash = crash\n\n def benchmark(self, *input_tensors: torch.Tensor, out: Optional[torch.Tensor]=None) -> float:\n if self.device is not None:\n assert os.environ.get(CUDA_VISIBLE_DEVICES, None) == str(self.device)\n if self.sleep:\n time.sleep(self.sleep)\n if self.exc:\n raise self.exc\n if self.crash:\n sys.exit(1)\n return self.result", + "docstring": "Supports unit testing. Defined in this file instead of the test file so the TuningProcess sub-process can unpickle these objects.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\autotune_process.py", + "ast_data": "ClassDef name:_TestBenchmarkRequest FunctionDef name:__init__ arg:self arg:result arg:device arg:sleep arg:exc arg:crash arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign FunctionDef name:benchmark arg:self arguments arg arg arg If Compare Compare Call Call If Call If Raise If Call Return return:yes" + }, + { + "library": "scipy", + "name": "__add__", + "source_code": "def __add__(self, other):\n if not self._check_binop_other(other):\n return NotImplemented\n if isinstance(other, StateSpace):\n if type(other) is not type(self):\n raise TypeError(f'Cannot add {type(self)} and {type(other)}')\n if self.dt != other.dt:\n raise TypeError('Cannot add systems with different `dt`.')\n a = linalg.block_diag(self.A, other.A)\n b = np.vstack((self.B, other.B))\n c = np.hstack((self.C, other.C))\n d = self.D + other.D\n else:\n other = np.atleast_2d(other)\n if self.D.shape == other.shape:\n a = self.A\n b = self.B\n c = self.C\n d = self.D + other\n else:\n raise ValueError(f'Cannot add systems with incompatible dimensions ({self.D.shape} and {other.shape})')\n common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype)\n return StateSpace(np.asarray(a, dtype=common_dtype), np.asarray(b, dtype=common_dtype), np.asarray(c, dtype=common_dtype), np.asarray(d, dtype=common_dtype), **self._dt_dict)", + "docstring": "Adds two systems in the sense of frequency domain addition.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:__add__ arg:self arg:other arguments arg arg If Call Return return:yes If Call If Compare Call Call Raise Call Call Call If Compare Raise Call Assign Call Assign Call Assign Call Assign Assign Call If Compare Assign Assign Assign Assign Raise Call Assign Call Return return:yes Call Call Call Call Call" + }, + { + "library": "pandas", + "name": "_isnan", + "source_code": "@property\ndef _isnan(self) -> npt.NDArray[np.bool_]:\n return self.asi8 == iNaT", + "docstring": "return if each value is nan", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", + "ast_data": "FunctionDef name:_isnan arg:self arguments arg Return return:yes Compare" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, logits: Tensor, boxes: Tensor, original_sizes: Tensor) -> Union[Tensor, list[Tensor]]:\n cxcy, wh = (boxes[..., :2], boxes[..., 2:])\n boxes_xy = concatenate([cxcy - wh * 0.5, wh], -1)\n sizes_wh = original_sizes[0].flip(0).unsqueeze(0).unsqueeze(0).repeat(1, 1, 2)\n boxes_xy = boxes_xy * sizes_wh\n scores = logits.sigmoid()\n scores, index = torch.topk(scores.flatten(1), self.num_top_queries, dim=-1)\n labels = mod(index, self.num_classes)\n index = index // self.num_classes\n boxes = boxes_xy.gather(dim=1, index=index.unsqueeze(-1).repeat(1, 1, boxes_xy.shape[-1]))\n all_boxes = concatenate([labels[..., None], scores[..., None], boxes], -1)\n if not self.confidence_filtering or self.confidence_threshold == 0:\n return all_boxes\n return self.box_filtering(all_boxes, self.confidence_threshold)", + "docstring": "Post-process outputs from DETR. Args: logits: tensor with shape :math:, where :math: is the batch size, :math: is the number of queries, :math: is the number of classes. boxes: tensor with shape :math:, where :math: is the batch size, :math: is the number of queries. original_sizes: tensor with shape :math:, where :math: is the batch size and each element represents the image size of (img_height, img_width). Returns: Processed detections. For each image, the detections have shape (D, 6), where D is the number of detections in that image, 6 represent (class_id, confidence_score, x, y, w, h).", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\post_processor.py", + "ast_data": "FunctionDef name:forward arg:self arg:logits arg:boxes arg:original_sizes arguments arg arg arg arg Assign Assign Call Assign Call Call Call Call Assign Assign Call Assign Call Call Assign Call Assign Assign Call Call Call Assign Call If BoolOp Compare Return return:yes Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "get_estimator_path", + "source_code": "def get_estimator_path(benchmark, directory, params, save=False):\n path = Path(__file__).resolve().parent / 'cache'\n path = path / 'estimators' / directory if save else path / 'tmp'\n filename = benchmark.__class__.__name__ + '_estimator_' + '_'.join(list(map(str, params))) + '.pkl'\n return path / filename", + "docstring": "Get path of pickled fitted estimator", + "type": "function", + "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\common.py", + "ast_data": "FunctionDef name:get_estimator_path arg:benchmark arg:directory arg:params arg:save arguments arg arg arg arg Assign Call Call Assign Assign Call Call Call Return return:yes" + }, + { + "library": "scrapy", + "name": "open", + "source_code": "def open(self, spider: Spider) -> IO[bytes]:\n pass", + "docstring": "Open the storage for the given spider. It must return a file-like object that will be used for the exporters", + "type": "method", + "file_path": "scrapy\\scrapy\\extensions\\feedexport.py", + "ast_data": "FunctionDef name:open arg:self arg:spider arguments arg arg" + }, + { + "library": "pytorch", + "name": "as_tuple", + "source_code": "def as_tuple(self, flatten: bool=True):\n if flatten:\n block_size = (self.BLOCK_SIZE[0], self.BLOCK_SIZE[1])\n seq_lengths = (self.seq_lengths[0], self.seq_lengths[1])\n else:\n block_size = (self.BLOCK_SIZE,)\n seq_lengths = (self.seq_lengths,)\n return (*seq_lengths, self.kv_num_blocks, self.kv_indices, self.full_kv_num_blocks, self.full_kv_indices, self.q_num_blocks, self.q_indices, self.full_q_num_blocks, self.full_q_indices, *block_size, self.mask_mod)", + "docstring": "Returns a tuple of the attributes of the BlockMask. Args: flatten (bool): If True, it will flatten the tuple of (KV_BLOCK_SIZE, Q_BLOCK_SIZE)", + "type": "method", + "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py", + "ast_data": "FunctionDef name:as_tuple arg:self arg:flatten arguments arg arg If Assign Assign Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "set_key_value", + "source_code": "def set_key_value(self, metric: str, key: str, value: Any) -> None:\n if self._level == 0:\n raise RuntimeError(f'Cannot set {metric} outside of a MetricsContext')\n if metric not in self._metrics:\n self._metrics[metric] = {}\n self._metrics[metric][key] = value", + "docstring": "Treats a give metric as a dictionary and set the k and value within it. Note that the metric must be a dictionary or not present. We allow this to be called multiple times (i.e. for features, it's not uncommon for them to be used multiple times within a single compilation).", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py", + "ast_data": "FunctionDef name:set_key_value arg:self arg:metric arg:key arg:value arguments arg arg arg arg If Compare Raise Call If Compare Assign Assign" + }, + { + "library": "pytorch", + "name": "DEFAULT_DEVICE", + "source_code": "def DEFAULT_DEVICE(self, guard: Guard):\n assert guard.source is GuardSource.GLOBAL\n code = [f'utils_device.CURRENT_DEVICE == {self.check_fn_manager.output_graph.current_device!r}']\n self._set_guard_export_info(guard, code)\n self.get_guard_manager(guard).add_default_device_guard(get_verbose_code_parts(code, guard))", + "docstring": "Guard on CURRENT_DEVICE per torch.utils._device", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\guards.py", + "ast_data": "FunctionDef name:DEFAULT_DEVICE arg:self arg:guard arguments arg arg Compare Assign Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_parse_grad_debug_op_name", + "source_code": "def _parse_grad_debug_op_name(op_name):\n name_items = op_name.split('/')\n assert len(name_items) > 1\n assert name_items[-1].startswith(_GRADIENT_DEBUG_TAG)\n grad_debugger_uuid = name_items[-1][len(_GRADIENT_DEBUG_TAG):]\n if '_' in grad_debugger_uuid:\n grad_debugger_uuid = grad_debugger_uuid[:grad_debugger_uuid.index('_')]\n orig_tensor_slot = int(name_items[-2][name_items[-2].rfind('_') + 1:])\n orig_base_op_name = name_items[-2][:name_items[-2].rfind('_')]\n orig_tensor_name = '/'.join(name_items[:-2] + [orig_base_op_name]) + ':%d' % orig_tensor_slot\n return (grad_debugger_uuid, orig_tensor_name)", + "docstring": "Parse the name of a debug gradient op. Args: op_name: the name of the debug gradient op. Returns: 1) The UUID of the GradientsDebugger that created the debug gradient op. 2) Name of the original tensor whose gradient is debugged by the debug gradient op.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_gradients.py", + "ast_data": "FunctionDef name:_parse_grad_debug_op_name arg:op_name arguments arg Assign Call Compare Call Call Assign Call If Compare Assign Call Assign Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_parse_error_message", + "source_code": "def _parse_error_message(self, message):\n error_code_mapping = {'Failed to functionalize Control Flow V1 ops. Consider using Control Flow V2 ops instead. See https://www.tensorflow.org/api_docs/python/tf/compat/v1/enable_control_flow_v2.': converter_error_data_pb2.ConverterErrorData.ERROR_UNSUPPORTED_CONTROL_FLOW_V1}\n for pattern, error_code in error_code_mapping.items():\n if pattern in message:\n error_data = converter_error_data_pb2.ConverterErrorData()\n error_data.error_message = message\n error_data.error_code = error_code\n self.append_error(error_data)\n return", + "docstring": "If the message matches a pattern, assigns the associated error code. It is difficult to assign an error code to some errrors in MLIR side, Ex: errors thrown by other components than TFLite or not using mlir::emitError. This function try to detect them by the error message and assign the corresponding error code. Args: message: The error message of this exception.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\convert_phase.py", + "ast_data": "FunctionDef name:_parse_error_message arg:self arg:message arguments arg arg Assign For Call If Compare Assign Call Assign Assign Call Return return:no" + }, + { + "library": "tensorflow", + "name": "generate", + "source_code": "@tf_export('__internal__.distribute.combinations.generate', v1=[])\ndef generate(combinations, test_combinations=()):\n default_combinations = (framework_combinations.EagerGraphCombination(), framework_combinations.TFVersionCombination(), ClusterCombination(), DistributionCombination(), GPUCombination(), TPUCombination())\n combination_decorator = combinations_lib.generate(combinations, test_combinations=default_combinations + test_combinations)\n\n def decorator(test_method_or_class):\n if isinstance(test_method_or_class, type):\n class_object = test_method_or_class\n for name, test_method in six.iteritems(class_object.__dict__.copy()):\n if name.startswith(unittest.TestLoader.testMethodPrefix) and isinstance(test_method, types.FunctionType):\n setattr(class_object, name, _multi_worker_test(test_method))\n return combination_decorator(class_object)\n else:\n return combination_decorator(_multi_worker_test(test_method_or_class))\n return decorator", + "docstring": "Distributed adapter of . All tests with distributed strategy should use this one instead of . This function has support of strategy combinations, GPU/TPU and multi worker support. See for usage.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py", + "ast_data": "FunctionDef name:generate arg:combinations arg:test_combinations arguments arg arg Assign Call Call Call Call Call Call Assign Call FunctionDef name:decorator arg:test_method_or_class arguments arg If Call Assign For Call Call If BoolOp Call Call Call Call Return return:yes Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "unimplemented_v2", + "source_code": "def unimplemented_v2(gb_type: str, context: str, explanation: str, hints: list[str], *, from_exc: Any=_NOTHING, log_warning: bool=False) -> NoReturn:\n msg = format_graph_break_message(gb_type, context, explanation, hints)\n if log_warning:\n log.warning(msg)\n if from_exc is not _NOTHING:\n raise Unsupported(msg) from from_exc\n raise Unsupported(msg)", + "docstring": "Called within dynamo to cause a graph break. Args: gb_type: Context-free graph break type. It should be a short string without any information specific to the tracing context (i.e. no dynamically-generated strings) context: Developer context for the graph break. It can contain tracing context/dynamic strings. explanation: User-facing context-dependent explanation for the graph break. Can be dynamic. hints: List of user-facing hints for the graph break.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\exc.py", + "ast_data": "FunctionDef name:unimplemented_v2 arg:gb_type arg:context arg:explanation arg:hints arguments arg arg arg arg arg arg Assign Call If Call If Compare Raise Call Raise Call" + }, + { + "library": "scipy", + "name": "freqresp", + "source_code": "def freqresp(self, w=None, n=10000, whole=False):\n return dfreqresp(self, w=w, n=n, whole=whole)", + "docstring": "Calculate the frequency response of a discrete-time system. Returns a 2-tuple containing arrays of frequencies [rad/s] and complex magnitude. See for details.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:freqresp arg:self arg:w arg:n arg:whole arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "__str__", + "source_code": "def __str__(self):\n iso_formatted_now = datetime.datetime.now(datetime.timezone.utc).isoformat('T')\n return f'{iso_formatted_now!s}Z'", + "docstring": "Return datetime in RFC3339 UTC Format.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cplogging.py", + "ast_data": "FunctionDef name:__str__ arg:self arguments arg Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "piecewise_constant", + "source_code": "@tf_export(v1=['train.piecewise_constant_decay', 'train.piecewise_constant'])\ndef piecewise_constant(x, boundaries, values, name=None):\n boundaries = nest.map_structure(tensor_conversion.convert_to_tensor_v2_with_dispatch, nest.flatten(boundaries))\n values = nest.map_structure(tensor_conversion.convert_to_tensor_v2_with_dispatch, nest.flatten(values))\n x_recomp = tensor_conversion.convert_to_tensor_v2_with_dispatch(x)\n for i, b in enumerate(boundaries):\n if b.dtype.base_dtype != x_recomp.dtype.base_dtype:\n if b.dtype.base_dtype == dtypes.int32 and x_recomp.dtype.base_dtype == dtypes.int64:\n b = math_ops.cast(b, x_recomp.dtype.base_dtype)\n boundaries[i] = b\n else:\n raise ValueError('Boundaries (%s) must have the same dtype as x (%s).' % (b.dtype.base_dtype, x_recomp.dtype.base_dtype))\n for v in values[1:]:\n if v.dtype.base_dtype != values[0].dtype.base_dtype:\n raise ValueError('Values must have elements all with the same dtype (%s vs %s).' % (values[0].dtype.base_dtype, v.dtype.base_dtype))\n decayed_lr = learning_rate_schedule.PiecewiseConstantDecay(boundaries, values, name=name)\n if not context.executing_eagerly():\n decayed_lr = decayed_lr(x)\n else:\n decayed_lr = functools.partial(decayed_lr, x)\n return decayed_lr", + "docstring": "Piecewise constant from boundaries and interval values. Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5 for the next 10000 steps, and 0.1 for any additional steps. Args: x: A 0-D scalar . Must be one of the following types: , , , , , , . boundaries: A list of s or s or s with strictly increasing entries, and with all elements having the same type as . values: A list of s or s or s that specifies the values for the intervals defined by . It should have one more element than , and all elements should have the same type. name: A string. Optional name of the operation. Defaults to 'PiecewiseConstant'. Returns: A 0-D Tensor. Its value is when and . Raises: ValueError: if types of and do not match, or types of all do not match or the number of elements in the lists does not match. @compatibility(eager) When eager execution is enabled, this function returns a function which in turn returns the decayed learning rate Tensor. This can be useful for changing the learning rate value across different invocations of optimizer functions. @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\legacy_learning_rate_decay.py", + "ast_data": "FunctionDef name:piecewise_constant arg:x arg:boundaries arg:values arg:name arguments arg arg arg arg Assign Call Call Assign Call Call Assign Call For Call If Compare If BoolOp Compare Compare Assign Call Assign Raise Call For If Compare Raise Call Assign Call If Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "transpose_homogeneous_masked_arrays", + "source_code": "def transpose_homogeneous_masked_arrays(masked_arrays: Sequence[BaseMaskedArray]) -> list[BaseMaskedArray]:\n masked_arrays = list(masked_arrays)\n dtype = masked_arrays[0].dtype\n values = [arr._data.reshape(1, -1) for arr in masked_arrays]\n transposed_values = np.concatenate(values, axis=0, out=np.empty((len(masked_arrays), len(masked_arrays[0])), order='F', dtype=dtype.numpy_dtype))\n masks = [arr._mask.reshape(1, -1) for arr in masked_arrays]\n transposed_masks = np.concatenate(masks, axis=0, out=np.empty_like(transposed_values, dtype=bool))\n arr_type = dtype.construct_array_type()\n transposed_arrays: list[BaseMaskedArray] = []\n for i in range(transposed_values.shape[1]):\n transposed_arr = arr_type(transposed_values[:, i], mask=transposed_masks[:, i])\n transposed_arrays.append(transposed_arr)\n return transposed_arrays", + "docstring": "Transpose masked arrays in a list, but faster. Input should be a list of 1-dim masked arrays of equal length and all have the same dtype. The caller is responsible for ensuring validity of input data.", + "type": "function", + "file_path": "pandas\\pandas\\core\\arrays\\masked.py", + "ast_data": "FunctionDef name:transpose_homogeneous_masked_arrays arg:masked_arrays arguments arg Assign Call Assign Assign Call Assign Call Call Call Call Assign Call Assign Call Call Assign Call For Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "create", + "source_code": "@staticmethod\ndef create(rpc_layer, address):\n if rpc_layer != 'grpc':\n raise ValueError('Only GRPC backend is supported at the moment.')\n return GrpcServer(address=address)", + "docstring": "Create TF RPC server at given address. Args: rpc_layer: Communication layer between client and server. Only \"grpc\" rpc layer is supported at the moment. address: Address where RPC server is hosted. Returns: An instance of class. Raises: A ValueError if rpc_layer other than \"grpc\" is used. Only GRPC is supported at the moment. Example usage: >>> import portpicker >>> @tf.function(input_signature=[ ... tf.TensorSpec([], tf.int32), ... tf.TensorSpec([], tf.int32)]) ... def remote_fn(a, b): ... return tf.add(a, b) >>> port = portpicker.pick_unused_port() >>> address = \"localhost:{}\".format(port) >>> server = tf.distribute.experimental.rpc.Server.create(\"grpc\", address) >>> server.register(\"addition\", remote_fn) >>> server.start()", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\rpc\\rpc_ops.py", + "ast_data": "FunctionDef name:create arg:rpc_layer arg:address arguments arg arg If Compare Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_add_scope_exit_callback", + "source_code": "def _add_scope_exit_callback(self, fn):\n if not callable(fn):\n raise TypeError('fn is not callable: {}'.format(fn))\n if self._scope_exit_callbacks is None:\n raise RuntimeError(\"Attempting to add a scope exit callback, but the default graph is not the context scope graph. Did you forget to call 'with graph.as_default(): ...'?\")\n self._scope_exit_callbacks.append(fn)", + "docstring": "Add a function to call when this graph exits the default scope.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py", + "ast_data": "FunctionDef name:_add_scope_exit_callback arg:self arg:fn arguments arg arg If Call Raise Call Call If Compare Raise Call Call" + }, + { + "library": "scipy", + "name": "_ttest_finish", + "source_code": "def _ttest_finish(df, t, alternative):\n if alternative == 'less':\n pval = special._ufuncs.stdtr(df, t)\n elif alternative == 'greater':\n pval = special._ufuncs.stdtr(df, -t)\n elif alternative == 'two-sided':\n pval = special._ufuncs.stdtr(df, -np.abs(t)) * 2\n else:\n raise ValueError(\"alternative must be 'less', 'greater' or 'two-sided'\")\n if t.ndim == 0:\n t = t[()]\n if pval.ndim == 0:\n pval = pval[()]\n return (t, pval)", + "docstring": "Common code between all 3 t-test functions.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", + "ast_data": "FunctionDef name:_ttest_finish arg:df arg:t arg:alternative arguments arg arg arg If Compare Assign Call If Compare Assign Call If Compare Assign Call Call Raise Call If Compare Assign If Compare Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "fedit", + "source_code": "def fedit(data, title='', comment='', icon=None, parent=None, apply=None):\n if QtWidgets.QApplication.startingUp():\n _app = QtWidgets.QApplication([])\n dialog = FormDialog(data, title, comment, icon, parent, apply)\n if parent is not None:\n if hasattr(parent, '_fedit_dialog'):\n parent._fedit_dialog.close()\n parent._fedit_dialog = dialog\n dialog.show()", + "docstring": "Create form dialog data: datalist, datagroup title: str comment: str icon: QIcon instance parent: parent QWidget apply: apply callback (function) datalist: list/tuple of (field_name, field_value) datagroup: list/tuple of (datalist *or* datagroup, title, comment) -> one field for each member of a datalist -> one tab for each member of a top-level datagroup -> one page (of a multipage widget, each page can be selected with a combo box) for each member of a datagroup inside a datagroup Supported types for field_value: - int, float, str, bool - colors: in Qt-compatible text form, i.e. in hex format or name (red, ...) (automatically detected from a string) - list/tuple: * the first element will be the selected index (or value) * the other elements can be couples (key, value) or only values", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\qt_editor\\_formlayout.py", + "ast_data": "FunctionDef name:fedit arg:data arg:title arg:comment arg:icon arg:parent arg:apply arguments arg arg arg arg arg arg If Call Assign Call Assign Call If Compare If Call Call Assign Call" + }, + { + "library": "matplotlib", + "name": "set_under", + "source_code": "def set_under(self, color='k', alpha=None):\n self._rgba_under = to_rgba(color, alpha)\n if self._isinit:\n self._set_extremes()", + "docstring": "Set the color for low out-of-range values.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:set_under arg:self arg:color arg:alpha arguments arg arg arg Assign Call If Call" + }, + { + "library": "tensorflow", + "name": "_parse_inference_type", + "source_code": "def _parse_inference_type(value, flag):\n if value == 'FLOAT':\n return dtypes.float32\n if value == 'INT8':\n return dtypes.int8\n if value == 'UINT8' or value == 'QUANTIZED_UINT8':\n return dtypes.uint8\n raise ValueError('Unsupported value for `{}` flag. Expected FLOAT, INT8, UINT8, or QUANTIZED_UINT8 instead got {}.'.format(flag, value))", + "docstring": "Converts the inference type to the value of the constant. Args: value: str representing the inference type. flag: str representing the flag name. Returns: tf.dtype. Raises: ValueError: Unsupported value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\tflite_convert.py", + "ast_data": "FunctionDef name:_parse_inference_type arg:value arg:flag arguments arg arg If Compare Return return:yes If Compare Return return:yes If BoolOp Compare Compare Return return:yes Raise Call Call" + }, + { + "library": "scikit-learn", + "name": "predict_log_proba", + "source_code": "def predict_log_proba(self, X):\n return np.log(self.predict_proba(X))", + "docstring": "Predict logarithm of probability estimates. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Returns ------- Y_log_prob : array-like of shape (n_samples, n_classes) The predicted logarithm of the probabilities.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\multioutput.py", + "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Return return:yes Call Call" + }, + { + "library": "cherrypy", + "name": "tail", + "source_code": "def tail(self, environ, start_response):\n return self.response_class(environ, start_response, self.cpapp)", + "docstring": "WSGI application callable for the actual CherryPy application. You probably shouldn't call this; call self.__call__ instead, so that any WSGI middleware in self.pipeline can run first.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpwsgi.py", + "ast_data": "FunctionDef name:tail arg:self arg:environ arg:start_response arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, learning_rate=0.001, rho=0.95, epsilon=1e-08, use_locking=False, name='Adadelta'):\n super(AdadeltaOptimizer, self).__init__(use_locking, name)\n self._lr = learning_rate\n self._rho = rho\n self._epsilon = epsilon\n self._lr_t = None\n self._rho_t = None\n self._epsilon_t = None", + "docstring": "Construct a new Adadelta optimizer. Args: learning_rate: A or a floating point value. The learning rate. To match the exact form in the original paper use 1.0. rho: A or a floating point value. The decay rate. epsilon: A or a floating point value. A constant epsilon used to better conditioning the grad update. use_locking: If use locks for update operations. name: Optional name prefix for the operations created when applying gradients. Defaults to \"Adadelta\".", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\adadelta.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:learning_rate arg:rho arg:epsilon arg:use_locking arg:name arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "add_function", + "source_code": "def add_function(self, call_fn, name, match_layer_training_arg):\n fn = LayerCall(self, self._maybe_wrap_with_training_arg(call_fn, match_layer_training_arg), name, input_signature=self.fn_input_signature)\n self._functions[name] = fn.wrapped_call\n return fn", + "docstring": "Adds a layer call function to the collection. Args: call_fn: a python function name: Name of call function match_layer_training_arg: If True, removes the from the function arguments when calling . Returns: LayerCall (tf.function)", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py", + "ast_data": "FunctionDef name:add_function arg:self arg:call_fn arg:name arg:match_layer_training_arg arguments arg arg arg arg Assign Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "set_rng_state", + "source_code": "def set_rng_state(new_state: Tensor, device: Union[int, str, torch.device]='xpu') -> None:\n with torch._C._DisableFuncTorch():\n new_state_copy = new_state.clone(memory_format=torch.contiguous_format)\n if isinstance(device, str):\n device = torch.device(device)\n elif isinstance(device, int):\n device = torch.device('xpu', device)\n\n def cb():\n idx = device.index\n if idx is None:\n idx = current_device()\n default_generator = torch.xpu.default_generators[idx]\n default_generator.set_state(new_state_copy)\n _lazy_call(cb)", + "docstring": "Set the random number generator state of the specified GPU. Args: new_state (torch.ByteTensor): The desired state device (torch.device or int, optional): The device to set the RNG state. Default: ``, the current XPU device).", + "type": "function", + "file_path": "pytorch\\torch\\xpu\\random.py", + "ast_data": "FunctionDef name:set_rng_state arg:new_state arg:device arguments arg arg With Call Assign Call If Call Assign Call If Call Assign Call FunctionDef name:cb arguments Assign If Compare Assign Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_set_resources_aborted", + "source_code": "def _set_resources_aborted(self, e):\n logging.info('[Worker %d] Clearing all resources.', self.worker_index)\n for weakref_resource in self._resource_remote_value_refs:\n resource = weakref_resource()\n if resource:\n resource._set_aborted(ClosureAbortedError(e))", + "docstring": "Set the resource ABORTED and add an error to it.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", + "ast_data": "FunctionDef name:_set_resources_aborted arg:self arg:e arguments arg arg Call For Assign Call If Call Call" + }, + { + "library": "django", + "name": "end_serialization", + "source_code": "def end_serialization(self):\n pass", + "docstring": "Called when serializing of the queryset ends.", + "type": "method", + "file_path": "django\\django\\core\\serializers\\base.py", + "ast_data": "FunctionDef name:end_serialization arg:self arguments arg" + }, + { + "library": "scikit-learn", + "name": "hyperparameters", + "source_code": "@property\ndef hyperparameters(self):\n r = []\n for hyperparameter in self.kernel.hyperparameters:\n r.append(Hyperparameter('kernel__' + hyperparameter.name, hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements))\n return r", + "docstring": "Returns a list of all hyperparameter.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:hyperparameters arg:self arguments arg Assign For Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "num_row_partitions", + "source_code": "@property\ndef num_row_partitions(self):\n return len(self._row_partitions)", + "docstring": "The number of row_partitions of the shape.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:num_row_partitions arg:self arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "MLP", + "source_code": "class MLP(Module):\n\n def __init__(self, num_dims: int, num_units: int=2, num_unit_layers: int=4, num_hidden: int=128) -> None:\n super().__init__()\n self._num_unit_layers = num_unit_layers\n layers = []\n for i in range(num_units):\n num_unit_inp_dims = num_dims if i == 0 else num_hidden + num_dims\n for j in range(num_unit_layers):\n num_layer_inp_dims = num_unit_inp_dims if j == 0 else num_hidden\n layer = nn.Linear(num_layer_inp_dims, num_hidden)\n layers.append(nn.Sequential(layer, nn.ReLU()))\n self._mlp = nn.ModuleList(layers)\n\n def forward(self, x: Tensor) -> Tensor:\n out = x\n x_skip = x\n for i, layer in enumerate(self._mlp):\n if i > 0 and i % self._num_unit_layers == 0:\n out = torch.cat((out, x_skip), dim=-1)\n out = layer(out)\n return out", + "docstring": "Class to represent a multi-layer perceptron. The MLP represents a deep NN of fully connected layers. The network is build of user defined sub-units, each with a user defined number of layers. Skip connections span between the sub-units. The model follows: Ben Mildenhall et el. (2020) at", + "type": "class", + "file_path": "kornia\\kornia\\nerf\\nerf_model.py", + "ast_data": "ClassDef name:MLP FunctionDef name:__init__ arg:self arg:num_dims arg:num_units arg:num_unit_layers arg:num_hidden arguments arg arg arg arg arg Call Call Assign Assign For Call Assign Compare For Call Assign Compare Assign Call Call Call Call Assign Call FunctionDef name:forward arg:self arg:x arguments arg arg Assign Assign For Call If BoolOp Compare Compare Assign Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, X):\n if sparse.issparse(X):\n n_samples = X.shape[0]\n output = []\n for batch in gen_batches(n_samples, self.batch_size_, min_batch_size=self.n_components or 0):\n output.append(super().transform(X[batch].toarray()))\n return np.vstack(output)\n else:\n return super().transform(X)", + "docstring": "Apply dimensionality reduction to X. X is projected on the first principal components previously extracted from a training set, using minibatches of size batch_size if X is sparse. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data, where is the number of samples and is the number of features. Returns ------- X_new : ndarray of shape (n_samples, n_components) Projection of X in the first principal components. Examples -------- >>> import numpy as np >>> from sklearn.decomposition import IncrementalPCA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], ... [1, 1], [2, 1], [3, 2]]) >>> ipca = IncrementalPCA(n_components=2, batch_size=3) >>> ipca.fit(X) IncrementalPCA(batch_size=3, n_components=2) >>> ipca.transform(X) # doctest: +SKIP", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_incremental_pca.py", + "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg If Call Assign Assign For Call BoolOp Call Call Call Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "LeakyReLU", + "source_code": "class LeakyReLU(torch.nn.LeakyReLU):\n\n def __init__(self, scale: float, zero_point: int, negative_slope: float=0.01, inplace: bool=False, device=None, dtype=None) -> None:\n factory_kwargs = {'device': device, 'dtype': dtype}\n super().__init__(negative_slope, inplace)\n self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))\n self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))\n\n def forward(self, input):\n return torch.ops.quantized.leaky_relu(input, self.negative_slope, self.inplace, self.scale, self.zero_point)\n\n def _get_name(self):\n return 'QuantizedLeakyReLU'\n\n @classmethod\n def from_float(cls, mod, use_precomputed_fake_quant=False):\n scale, zero_point = mod.activation_post_process.calculate_qparams()\n return cls(float(scale), int(zero_point), mod.negative_slope, mod.inplace)\n\n @classmethod\n def from_reference(cls, mod, scale, zero_point):\n return cls(float(scale), int(zero_point), mod.negative_slope, mod.inplace)", + "docstring": "This is the quantized equivalent of :class:. Args: scale: quantization scale of the output tensor zero_point: quantization zero point of the output tensor negative_slope: Controls the angle of the negative slope. Default: 1e-2", + "type": "class", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\activation.py", + "ast_data": "ClassDef name:LeakyReLU FunctionDef name:__init__ arg:self arg:scale arg:zero_point arg:negative_slope arg:inplace arg:device arg:dtype arguments arg arg arg arg arg arg arg Assign Call Call Call Call Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Assign Call Return return:yes Call Call Call FunctionDef name:from_reference arg:cls arg:mod arg:scale arg:zero_point arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "_get_estimator", + "source_code": "def _get_estimator(self):\n if self.estimator is None:\n return DecisionTreeClassifier()\n return self.estimator", + "docstring": "Resolve which estimator to return (default is DecisionTreeClassifier)", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py", + "ast_data": "FunctionDef name:_get_estimator arg:self arguments arg If Compare Return return:yes Call Return return:yes" + }, + { + "library": "kornia", + "name": "_check_disparity_tensor", + "source_code": "def _check_disparity_tensor(disparity_tensor: Tensor) -> None:\n if not isinstance(disparity_tensor, Tensor):\n raise StereoException(f\"Expected 'disparity_tensor' to be an instance of Tensor but got {type(disparity_tensor)}.\")\n if len(disparity_tensor.shape) != 4:\n raise StereoException(f\"Expected 'disparity_tensor' to have 4 dimensions. Got {disparity_tensor.shape}.\")\n if disparity_tensor.shape[-1] != 1:\n raise StereoException(f\"Expected dimension 1 of 'disparity_tensor' to be 1 for as single channeled disparity map.Got {disparity_tensor.shape}.\")\n if disparity_tensor.dtype not in (torch.float16, torch.float32, torch.float64):\n raise StereoException(f\"Expected 'disparity_tensor' to have dtype torch.float16, torch.float32 or torch.float64.Got {disparity_tensor.dtype}\")", + "docstring": "Ensure correct user provided correct disparity tensor. Args: disparity_tensor: The disparity tensor of shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py", + "ast_data": "FunctionDef name:_check_disparity_tensor arg:disparity_tensor arguments arg If Call Raise Call Call If Compare Call Raise Call If Compare Raise Call If Compare Raise Call" + }, + { + "library": "tensorflow", + "name": "_possibly_broadcast_batch_shape", + "source_code": "def _possibly_broadcast_batch_shape(self, x):\n if self._batch_shape_arg is None:\n return x\n special_shape = self.batch_shape.concatenate([1, 1])\n bshape = array_ops.broadcast_static_shape(x.shape, special_shape)\n if special_shape.is_fully_defined():\n if bshape == x.shape:\n return x\n zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)\n return x + zeros\n special_shape = array_ops.concat((self.batch_shape_tensor(), [1, 1]), 0)\n zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)\n return x + zeros", + "docstring": "Return 'x', possibly after broadcasting the leading dimensions.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_zeros.py", + "ast_data": "FunctionDef name:_possibly_broadcast_batch_shape arg:self arg:x arguments arg arg If Compare Return return:yes Assign Call Assign Call If Call If Compare Return return:yes Assign Call Return return:yes Assign Call Call Assign Call Return return:yes" + }, + { + "library": "authlib", + "name": "save_token", + "source_code": "def save_token(self, token, request):\n raise NotImplementedError()", + "docstring": "Define function to save the generated token into database.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py", + "ast_data": "FunctionDef name:save_token arg:self arg:token arg:request arguments arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, strategy):\n self._num_workers = strategy._num_workers\n self._num_ps = strategy._num_ps\n self._transient_ps_failures_threshold = int(os.environ.get('TF_COORDINATOR_IGNORE_TRANSIENT_PS_FAILURES', 3))\n self._potential_ps_failures_lock = threading.Lock()\n self._potential_ps_failures_count = [0] * self._num_ps\n self._transient_timeouts_threshold = int(os.environ.get('TF_COORDINATOR_IGNORE_TRANSIENT_TIMEOUTS', self._num_workers // 10))\n self._transient_timeouts_lock = threading.Lock()\n self._transient_timeouts_count = 0\n self.closure_queue = _CoordinatedClosureQueue()\n if os.getenv('TF_PSS_ENABLE_COORDINATION_SERVICE'):\n self.failure_handler = CoordinationServicePreemptionHandler(context.get_server_def(), self)\n else:\n self.failure_handler = WorkerPreemptionHandler(context.get_server_def(), self)\n worker_device_strings = ['/job:worker/replica:0/task:%d' % i for i in range(self._num_workers)]\n self.workers = [Worker(i, w, self) for i, w in enumerate(worker_device_strings)]\n self.resource_cancellation_mgr = cancellation.CancellationManager()", + "docstring": "Initializes the cluster instance.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:strategy arguments arg arg Assign Assign Assign Call Call Assign Call Assign Assign Call Call Assign Call Assign Assign Call If Call Assign Call Call Assign Call Call Assign Call Assign Call Call Assign Call" + }, + { + "library": "tensorflow", + "name": "_is_integer_like_by_dtype", + "source_code": "def _is_integer_like_by_dtype(dt):\n if not _is_known_dtype(dt):\n raise TypeError('Unrecognized dtype: {}'.format(dt.name))\n return dt.is_integer or dt.base_dtype == dtypes.bool", + "docstring": "Helper returning True if dtype.is_integer or is .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py", + "ast_data": "FunctionDef name:_is_integer_like_by_dtype arg:dt arguments arg If Call Raise Call Call Return return:yes BoolOp Compare" + }, + { + "library": "pandas", + "name": "_encode_strings", + "source_code": "def _encode_strings(self) -> None:\n convert_dates = self._convert_dates\n convert_strl = getattr(self, '_convert_strl', [])\n for i, col in enumerate(self.data):\n if i in convert_dates or col in convert_strl:\n continue\n column = self.data[col]\n dtype = column.dtype\n if dtype.type is np.object_:\n inferred_dtype = infer_dtype(column, skipna=True)\n if not (inferred_dtype == 'string' or len(column) == 0):\n col = column.name\n raise ValueError(f'Column `{col}` cannot be exported.\\n\\nOnly string-like object arrays\\ncontaining all strings or a mix of strings and None can be exported.\\nObject arrays containing only null values are prohibited. Other object\\ntypes cannot be exported and must first be converted to one of the\\nsupported types.')\n encoded = self.data[col].str.encode(self._encoding)\n if max_len_string_array(ensure_object(encoded._values)) <= self._max_string_length:\n self.data[col] = encoded", + "docstring": "Encode strings in dta-specific encoding Do not encode columns marked for date conversion or for strL conversion. The strL converter independently handles conversion and also accepts empty string arrays.", + "type": "method", + "file_path": "pandas\\pandas\\io\\stata.py", + "ast_data": "FunctionDef name:_encode_strings arg:self arguments arg Assign Assign Call For Call If BoolOp Compare Compare Assign Assign If Compare Assign Call If BoolOp Compare Compare Call Assign Raise Call Assign Call If Compare Call Call Assign" + }, + { + "library": "pytorch", + "name": "state", + "source_code": "@property\ndef state(self) -> _RendezvousState:\n return self._state", + "docstring": "See base class.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py", + "ast_data": "FunctionDef name:state arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "get_transformation_matrix", + "source_code": "def get_transformation_matrix(self, input: Tensor, params: Optional[List[ParamItem]]=None, recompute: bool=False, extra_args: Optional[Dict[str, Any]]=None) -> Optional[Tensor]:\n if params is None:\n raise NotImplementedError('requires params to be provided.')\n named_modules: Iterator[Tuple[str, Module]] = self.get_forward_sequence(params)\n res_mat: Optional[Tensor] = None\n for (_, module), param in zip(named_modules, params if params is not None else []):\n module = cast(PolicySequential, module)\n mat = module.get_transformation_matrix(input, params=cast(Optional[List[ParamItem]], param.data), recompute=recompute, extra_args=extra_args)\n res_mat = mat if res_mat is None else mat @ res_mat\n return res_mat", + "docstring": "Compute the transformation matrix according to the provided parameters. Args: input: the input tensor. params: params for the sequence. recompute: if to recompute the transformation matrix according to the params. default: False. extra_args: Optional dictionary of extra arguments with specific options for different input types.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\auto\\base.py", + "ast_data": "FunctionDef name:get_transformation_matrix arg:self arg:input arg:params arg:recompute arg:extra_args arguments arg arg arg arg arg If Compare Raise Call Call For Call Compare Assign Call Assign Call Call Assign Compare Return return:yes" + }, + { + "library": "numpy", + "name": "legpow", + "source_code": "def legpow(c, pow, maxpower=16):\n return pu._pow(legmul, c, pow, maxpower)", + "docstring": "Raise a Legendre series to a power. Returns the Legendre series raised to the power . The argument is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series `` Parameters ---------- c : array_like 1-D array of Legendre series coefficients ordered from low to high. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Legendre series of power. See Also -------- legadd, legsub, legmulx, legmul, legdiv", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\legendre.py", + "ast_data": "FunctionDef name:legpow arg:c arg:pow arg:maxpower arguments arg arg arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "find_position", + "source_code": "def find_position(string, index, last_index, last_pos):\n lines = string.count('\\n', last_index, index)\n if lines > 0:\n column = index - string.rfind('\\n', last_index, index)\n else:\n column = last_pos[1] + (index - last_index)\n return (last_pos[0] + lines, column)", + "docstring": "Given a string and index, return (line, column)", + "type": "function", + "file_path": "numpy\\numpy\\_build_utils\\tempita\\_tempita.py", + "ast_data": "FunctionDef name:find_position arg:string arg:index arg:last_index arg:last_pos arguments arg arg arg arg Assign Call If Compare Assign Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "imread", + "source_code": "def imread(fname, format=None):\n from urllib import parse\n if format is None:\n if isinstance(fname, str):\n parsed = parse.urlparse(fname)\n if len(parsed.scheme) > 1:\n ext = 'png'\n else:\n ext = Path(fname).suffix.lower()[1:]\n elif hasattr(fname, 'geturl'):\n ext = 'png'\n elif hasattr(fname, 'name'):\n ext = Path(fname.name).suffix.lower()[1:]\n else:\n ext = 'png'\n else:\n ext = format\n img_open = PIL.PngImagePlugin.PngImageFile if ext == 'png' else PIL.Image.open\n if isinstance(fname, str) and len(parse.urlparse(fname).scheme) > 1:\n raise ValueError('Please open the URL for reading and pass the result to Pillow, e.g. with ``np.array(PIL.Image.open(urllib.request.urlopen(url)))``.')\n with img_open(fname) as image:\n return _pil_png_to_float_array(image) if isinstance(image, PIL.PngImagePlugin.PngImageFile) else pil_to_array(image)", + "docstring": "Read an image from a file into an array. .. note:: This function exists for historical reasons. It is recommended to use instead for loading images. Parameters ---------- fname : str or file-like The image file to read: a filename, a URL or a file-like object opened in read-binary mode. Passing a URL is deprecated. Please open the URL for reading and pass the result to Pillow, e.g. with `PIL.Image.opennumpy.array` The image data. The returned array has shape - (M, N) for grayscale images. - (M, N, 3) for RGB images. - (M, N, 4) for RGBA images. PNG images are returned as float arrays (0-1). All other formats are returned as int arrays, with a bit depth determined by the file's contents.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\image.py", + "ast_data": "FunctionDef name:imread arg:fname arg:format arguments arg arg If Compare If Call Assign Call If Compare Call Assign Assign Call Call If Call Assign If Call Assign Call Call Assign Assign Assign Compare If BoolOp Call Compare Call Call Raise Call With Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "dfs", + "source_code": "def dfs(module: nn.Module) -> None:\n if not _is_composable_with_fsdp(module):\n return\n elif module not in root_modules_set and _get_module_fsdp_state(module) is not None:\n return\n visited_modules.add(module)\n for submodule in module.children():\n if submodule not in visited_modules:\n dfs(submodule)\n modules.append(module)", + "docstring": "Runs a DFS to collect managed modules, not recursing into modules with a non-composable API or `` already applied.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_init.py", + "ast_data": "FunctionDef name:dfs arg:module arguments arg If Call Return return:no If BoolOp Compare Compare Call Return return:no Call For Call If Compare Call Call" + }, + { + "library": "scipy", + "name": "_with_data", + "source_code": "def _with_data(self, data, copy=True):\n if copy:\n return self._dia_container((data, self.offsets.copy()), shape=self.shape)\n else:\n return self._dia_container((data, self.offsets), shape=self.shape)", + "docstring": "Returns a matrix with the same sparsity structure as self, but with different data. By default the structure arrays are copied.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_dia.py", + "ast_data": "FunctionDef name:_with_data arg:self arg:data arg:copy arguments arg arg arg If Return return:yes Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "wait", + "source_code": "def wait(self, stream=None) -> None:\n if stream is None:\n stream = torch.cuda.current_stream()\n super().wait(stream)", + "docstring": "Make all future work submitted to the given stream wait for this event. Use `CUDA Event documentation`_ for more info.", + "type": "method", + "file_path": "pytorch\\torch\\cuda\\streams.py", + "ast_data": "FunctionDef name:wait arg:self arg:stream arguments arg arg If Compare Assign Call Call Call" + }, + { + "library": "kornia", + "name": "CornerHarris", + "source_code": "class CornerHarris(Module):\n k: Tensor\n\n def __init__(self, k: Union[float, Tensor], grads_mode: str='sobel') -> None:\n super().__init__()\n if isinstance(k, float):\n self.register_buffer('k', tensor(k))\n else:\n self.register_buffer('k', k)\n self.grads_mode: str = grads_mode\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(k={self.k}, grads_mode={self.grads_mode})'\n\n def forward(self, input: Tensor, sigmas: Optional[Tensor]=None) -> Tensor:\n return harris_response(input, self.k, self.grads_mode, sigmas)", + "docstring": "Module that calculates Harris corners. .. image:: _static/img/harris_response.png See :func: for details.", + "type": "class", + "file_path": "kornia\\kornia\\feature\\responses.py", + "ast_data": "ClassDef name:CornerHarris FunctionDef name:__init__ arg:self arg:k arg:grads_mode arguments arg arg arg Call Call If Call Call Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arg:sigmas arguments arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "cache_key", + "source_code": "def cache_key(self, template_name, skip=None):\n skip_prefix = ''\n if skip:\n matching = [origin.name for origin in skip if origin.template_name == template_name]\n if matching:\n skip_prefix = self.generate_hash(matching)\n return '-'.join((s for s in (str(template_name), skip_prefix) if s))", + "docstring": "Generate a cache key for the template name and skip. If skip is provided, only origins that match template_name are included in the cache key. This ensures each template is only parsed and cached once if contained in different extend chains like: x -> a -> a y -> a -> a z -> a -> a", + "type": "method", + "file_path": "django\\django\\template\\loaders\\cached.py", + "ast_data": "FunctionDef name:cache_key arg:self arg:template_name arg:skip arguments arg arg arg Assign If Assign Compare If Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "add_function_def", + "source_code": "def add_function_def(self, fdef):\n self.ensure_initialized()\n if is_oss:\n fdef_string = fdef.SerializeToString()\n pywrap_tfe.TFE_ContextAddFunctionDef(self._handle, fdef_string, len(fdef_string))\n else:\n pywrap_tfe.TFE_ContextAddFunctionDefNoSerialization(self._handle, fdef)", + "docstring": "Add a function definition to the context. Once added, the function (identified by its name) can be executed like any other operation. Args: fdef: A FunctionDef protocol buffer message.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:add_function_def arg:self arg:fdef arguments arg arg Call If Assign Call Call Call Call" + }, + { + "library": "pytorch", + "name": "SymbolBuffer", + "source_code": "@dataclasses.dataclass\nclass SymbolBuffer(CodegenSymbol):\n symbol: sympy.Symbol\n\n def get_name(self) -> str:\n return str(self.symbol)\n\n def get_example(self) -> Union[torch.Tensor, sympy.Symbol]:\n return self.symbol", + "docstring": "Represents a sympy.Symbol graph input.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py", + "ast_data": "ClassDef name:SymbolBuffer FunctionDef name:get_name arg:self arguments arg Return return:yes Call FunctionDef name:get_example arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "any", + "source_code": "def any(self, *, skipna: bool=True, **kwargs) -> bool | NAType:\n return self._reduce('any', skipna=skipna, **kwargs)", + "docstring": "Return whether any element is truthy. Returns False unless there is at least one element that is truthy. By default, NAs are skipped. If `Kleene logic skipnaskipnapandas.NA` is True or False influences the result): >>> pd.array([True, False, pd.NA], dtype=\"boolean[pyarrow]\").any(skipna=False) True >>> pd.array([1, 0, pd.NA], dtype=\"boolean[pyarrow]\").any(skipna=False) True >>> pd.array([False, False, pd.NA], dtype=\"boolean[pyarrow]\").any(skipna=False) >>> pd.array([0, 0, pd.NA], dtype=\"boolean[pyarrow]\").any(skipna=False)", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py", + "ast_data": "FunctionDef name:any arg:self arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "from_features", + "source_code": "@classmethod\ndef from_features(cls, features, types):\n params = cls()\n if features:\n for key in sorted(features.keys()):\n feature = features[key]\n if not isinstance(feature, tuple(types)):\n raise ValueError(f\"Unsupported {type(feature).__name__} {feature} for key '{key}'\")\n params._add_feature(key, feature)\n params._validate()\n return params", + "docstring": "Builds _ParseOpParams for a given set of features and allowed types. Args: features: A mapping feature keys to objects of a type in . types: Type of features to allow, among , , , and . Returns: A containing the raw parameters for . Raises: ValueError: if contains an item not in , or an invalid feature. ValueError: if sparse and dense key sets intersect. ValueError: if input lengths do not match up.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py", + "ast_data": "FunctionDef name:from_features arg:cls arg:features arg:types arguments arg arg arg Assign Call If For Call Call Assign If Call Call Raise Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "to_dict", + "source_code": "def to_dict(self) -> dict[str, Any]:\n dtype_config_dict: dict[str, Any] = {}\n if self.input_dtype is not None:\n dtype_config_dict[INPUT_DTYPE_DICT_KEY] = self.input_dtype_with_constraints\n if self.output_dtype is not None:\n dtype_config_dict[OUTPUT_DTYPE_DICT_KEY] = self.output_dtype_with_constraints\n if self.weight_dtype is not None:\n dtype_config_dict[WEIGHT_DTYPE_DICT_KEY] = self.weight_dtype_with_constraints\n if self.bias_dtype is not None:\n dtype_config_dict[BIAS_DTYPE_DICT_KEY] = self.bias_dtype\n if self.is_dynamic is not None:\n dtype_config_dict[IS_DYNAMIC_DICT_KEY] = self.is_dynamic\n return dtype_config_dict", + "docstring": "Convert this `~torch.ao.quantization.backend_config.DTypeConfig.from_dict`.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py", + "ast_data": "FunctionDef name:to_dict arg:self arguments arg If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "opset_version", + "source_code": "@property\ndef opset_version(self) -> int:\n return self._opset_version", + "docstring": "The ONNX opset version the exporter should target.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py", + "ast_data": "FunctionDef name:opset_version arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "compute_subpixel_step", + "source_code": "def compute_subpixel_step(self) -> Tensor:\n delta_d = 0.01\n xy_m1 = self._compute_projection(self.width / 2, self.height / 2, 1.0 - delta_d)\n xy_p1 = self._compute_projection(self.width / 2, self.height / 2, 1.0 + delta_d)\n dx = torch.norm(xy_p1 - xy_m1, 2, dim=-1) / 2.0\n dxdd = dx / delta_d\n return torch.min(0.5 / dxdd)", + "docstring": "Compute the inverse depth step for sub pixel accurate sampling of the depth cost volume, per camera. Szeliski, Richard, and Daniel Scharstein. \"Symmetric sub-pixel stereo matching.\" European Conference on Computer Vision. Springer Berlin Heidelberg, 2002.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\depth.py", + "ast_data": "FunctionDef name:compute_subpixel_step arg:self arguments arg Assign Assign Call Assign Call Assign Call Assign Return return:yes Call" + }, + { + "library": "pandas", + "name": "_math_mode_with_parentheses", + "source_code": "def _math_mode_with_parentheses(s: str) -> str:\n s = s.replace('\\\\(', 'LEFT§=§6yzLEFT').replace('\\\\)', 'RIGHTab5§=§RIGHT')\n res = []\n for item in re.split('LEFT§=§6yz|ab5§=§RIGHT', s):\n if item.startswith('LEFT') and item.endswith('RIGHT'):\n res.append(item.replace('LEFT', '\\\\(').replace('RIGHT', '\\\\)'))\n elif 'LEFT' in item and 'RIGHT' in item:\n res.append(_escape_latex(item).replace('LEFT', '\\\\(').replace('RIGHT', '\\\\)'))\n else:\n res.append(_escape_latex(item).replace('LEFT', '\\\\textbackslash (').replace('RIGHT', '\\\\textbackslash )'))\n return ''.join(res)", + "docstring": "All characters in LaTeX math mode are preserved. The substrings in LaTeX math mode, which start with the character ``, are preserved without escaping. Otherwise regular LaTeX escaping applies. Parameters ---------- s : str Input to be escaped Return ------ str : Escaped string", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\style_render.py", + "ast_data": "FunctionDef name:_math_mode_with_parentheses arg:s arguments arg Assign Call Call Assign For Call If BoolOp Call Call Call Call Call If BoolOp Compare Compare Call Call Call Call Call Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "create_python_return_type_bindings", + "source_code": "def create_python_return_type_bindings(fm: FileManager, pairs: Sequence[PythonSignatureNativeFunctionPair], pred: Callable[[NativeFunction], bool], filename: str) -> None:\n py_return_types_definition: list[str] = []\n py_return_types_registrations: list[str] = []\n grouped = group_filter_overloads(pairs, pred)\n for name in sorted(grouped.keys(), key=str):\n overloads = grouped[name]\n definitions, registrations = generate_return_type_definition_and_registrations(overloads)\n py_return_types_definition.append('' if not definitions else '\\n'.join(definitions))\n py_return_types_registrations.append('' if not registrations else '\\n'.join(registrations))\n fm.write_with_template(filename, filename, lambda: {'generated_comment': '@' + f'generated from {fm.template_dir_for_comments()}/{filename}', 'py_return_types': py_return_types_definition, 'py_return_types_registrations': py_return_types_registrations})", + "docstring": "Generate function to initialize and return named tuple for native functions which returns named tuple and registration invocations in .", + "type": "function", + "file_path": "pytorch\\tools\\autograd\\gen_python_functions.py", + "ast_data": "FunctionDef name:create_python_return_type_bindings arg:fm arg:pairs arg:pred arg:filename arguments arg arg arg arg Assign Call For Call Call Assign Assign Call Call Call Call Call Call arguments Call" + }, + { + "library": "sphinx", + "name": "Target", + "source_code": "class Target(SphinxDirective):\n indextemplate = ''\n has_content = False\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = True\n option_spec: ClassVar[OptionSpec] = {}\n\n def run(self) -> list[Node]:\n fullname = ws_re.sub(' ', self.arguments[0].strip())\n node_id = make_id(self.env, self.state.document, self.name, fullname)\n node = nodes.target('', '', ids=[node_id])\n self.set_source_info(node)\n self.state.document.note_explicit_target(node)\n ret: list[Node] = [node]\n if self.indextemplate:\n indexentry = self.indextemplate % (fullname,)\n indextype = 'single'\n colon = indexentry.find(':')\n if colon != -1:\n indextype = indexentry[:colon].strip()\n indexentry = indexentry[colon + 1:].strip()\n inode = addnodes.index(entries=[(indextype, indexentry, node_id, '', None)])\n ret.insert(0, inode)\n name = self.name\n if ':' in self.name:\n _, name = self.name.split(':', 1)\n std = self.env.domains.standard_domain\n std.note_object(name, fullname, node_id, location=node)\n return ret", + "docstring": "Generic target for user-defined cross-reference types.", + "type": "class", + "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py", + "ast_data": "ClassDef name:Target Assign Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg Assign Call Call Assign Call Assign Call Call Call If Assign Assign Assign Call If Compare Assign Call Assign Call Assign Call Call Assign If Compare Assign Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "trapz", + "source_code": "@set_module('numpy')\ndef trapz(y, x=None, dx=1.0, axis=-1):\n warnings.warn('`trapz` is deprecated. Use `trapezoid` instead, or one of the numerical integration functions in `scipy.integrate`.', DeprecationWarning, stacklevel=2)\n return trapezoid(y, x=x, dx=dx, axis=axis)", + "docstring": "is deprecated in NumPy 2.0. Please use instead, or one of the numerical integration functions in .", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_function_base_impl.py", + "ast_data": "FunctionDef name:trapz arg:y arg:x arg:dx arg:axis arguments arg arg arg arg Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_get_gradient_op_type", + "source_code": "def _get_gradient_op_type(node_def):\n if '_gradient_op_type' in node_def.attr and node_def.op not in ['StatefulPartitionedCall', 'PartitionedCall']:\n return node_def.attr['_gradient_op_type'].s\n return None", + "docstring": "Returns the custom gradient op type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_deserialization.py", + "ast_data": "FunctionDef name:_get_gradient_op_type arg:node_def arguments arg If BoolOp Compare Compare Return return:yes Return return:no" + }, + { + "library": "scipy", + "name": "__init__", + "source_code": "def __init__(self, x, field=None, nn=None, index=None, field_args=(), g_cons=None, g_cons_args=()):\n super().__init__(x, nn=nn, index=index)\n self.check_min = True\n self.check_max = True", + "docstring": "Parameters ---------- x : tuple, vector of vertex coordinates field : callable, optional a scalar field f: R^n --> R associated with the geometry nn : list, optional list of nearest neighbours index : int, optional index of the vertex field_args : tuple, optional additional arguments to be passed to field g_cons : callable, optional constraints on the vertex g_cons_args : tuple, optional additional arguments to be passed to g_cons", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:x arg:field arg:nn arg:index arg:field_args arg:g_cons arg:g_cons_args arguments arg arg arg arg arg arg arg arg Call Call Assign Assign" + }, + { + "library": "pytorch", + "name": "DictViewVariable", + "source_code": "class DictViewVariable(VariableTracker):\n kv: Optional[str] = None\n\n def __init__(self, dv_dict: ConstDictVariable, **kwargs) -> None:\n super().__init__(**kwargs)\n assert self.kv in ('keys', 'values', 'items')\n assert isinstance(dv_dict, ConstDictVariable)\n self.dv_dict = dv_dict\n\n @property\n def view_items(self):\n return getattr(self.dv_dict.items, self.kv)()\n\n @property\n def view_items_vt(self):\n raise NotImplementedError\n\n def unpack_var_sequence(self, tx):\n return self.view_items_vt\n\n def reconstruct(self, codegen: 'PyCodegen'):\n codegen(self.dv_dict)\n codegen.load_method(self.kv)\n codegen.call_method(0)\n\n def call_method(self, tx, name, args: list['VariableTracker'], kwargs: dict[str, 'VariableTracker']) -> 'VariableTracker':\n if name == '__len__':\n return self.dv_dict.call_method(tx, name, args, kwargs)\n return super().call_method(tx, name, args, kwargs)", + "docstring": "Models _PyDictViewObject This is an \"abstract\" class. Subclasses will override kv and the items method", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\variables\\dicts.py", + "ast_data": "ClassDef name:DictViewVariable FunctionDef name:__init__ arg:self arg:dv_dict arguments arg arg arg Call Call Compare Call Assign FunctionDef name:view_items arg:self arguments arg Return return:yes Call Call FunctionDef name:view_items_vt arg:self arguments arg Raise FunctionDef name:unpack_var_sequence arg:self arg:tx arguments arg arg Return return:yes FunctionDef name:reconstruct arg:self arg:codegen arguments arg arg Call Call Call FunctionDef name:call_method arg:self arg:tx arg:name arg:args arg:kwargs arguments arg arg arg arg arg If Compare Return return:yes Call Return return:yes Call Call" + }, + { + "library": "cherrypy", + "name": "_merged_args", + "source_code": "def _merged_args(self, d=None):\n if d:\n conf = d.copy()\n else:\n conf = {}\n tm = cherrypy.serving.request.toolmaps[self.namespace]\n if self._name in tm:\n conf.update(tm[self._name])\n if 'on' in conf:\n del conf['on']\n return conf", + "docstring": "Return a dict of configuration entries for this Tool.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cptools.py", + "ast_data": "FunctionDef name:_merged_args arg:self arg:d arguments arg arg If Assign Call Assign Assign If Compare Call If Compare Return return:yes" + }, + { + "library": "tensorflow", + "name": "merge_options", + "source_code": "def merge_options(*options_list):\n if len(options_list) < 1:\n raise ValueError('At least one options should be provided')\n result_type = type(options_list[0])\n for options in options_list:\n if not isinstance(options, result_type):\n raise TypeError('Could not merge incompatible options of type {} and {}.'.format(type(options), result_type))\n if not isinstance(options_list[0], OptionsBase):\n raise TypeError('All options to be merged should inherit from `OptionsBase` but found option of type {} which does not.'.format(type(options_list[0])))\n default_options = result_type()\n result = result_type()\n for options in options_list:\n for name in options._options:\n this = getattr(result, name)\n that = getattr(options, name)\n default = getattr(default_options, name)\n if that == default:\n continue\n elif this == default:\n setattr(result, name, that)\n elif isinstance(this, OptionsBase):\n setattr(result, name, merge_options(this, that))\n elif name == 'framework_type':\n setattr(result, name, this + that)\n elif this != that:\n logging.warning('Changing the value of option %s from %r to %r.', name, this, that)\n setattr(result, name, that)\n return result", + "docstring": "Merges the given options, returning the result as a new options object. The input arguments are expected to have a matching type that derives from (and thus each represent a set of options). The method outputs an object of the same type created by merging the sets of options represented by the input arguments. If an option is set to different values by different options objects, the result will match the setting of the options object that appears in the input list last. If an option is an instance of itself, then this method is applied recursively to the set of options represented by this option. Args: *options_list: options to merge Raises: TypeError: if the input arguments are incompatible or not derived from Returns: A new options object which is the result of merging the given options.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\util\\options.py", + "ast_data": "FunctionDef name:merge_options arguments arg If Compare Call Raise Call Assign Call For If Call Raise Call Call Call If Call Raise Call Call Call Assign Call Assign Call For For Assign Call Assign Call Assign Call If Compare If Compare Call If Call Call Call If Compare Call If Compare Call Call Return return:yes" + }, + { + "library": "django", + "name": "widthratio", + "source_code": "@register.tag\ndef widthratio(parser, token):\n bits = token.split_contents()\n if len(bits) == 4:\n tag, this_value_expr, max_value_expr, max_width = bits\n asvar = None\n elif len(bits) == 6:\n tag, this_value_expr, max_value_expr, max_width, as_, asvar = bits\n if as_ != 'as':\n raise TemplateSyntaxError(\"Invalid syntax in widthratio tag. Expecting 'as' keyword\")\n else:\n raise TemplateSyntaxError('widthratio takes at least three arguments')\n return WidthRatioNode(parser.compile_filter(this_value_expr), parser.compile_filter(max_value_expr), parser.compile_filter(max_width), asvar=asvar)", + "docstring": "For creating bar charts and such. Calculate the ratio of a given value to a maximum value, and then apply that ratio to a constant. For example:: If `` is 100, the image in the above example will be 88 pixels wide (because 175/200 = .875; .875 * 100 = 87.5 which is rounded up to 88). In some cases you might want to capture the result of widthratio in a variable. It can be useful for instance in a blocktranslate like this:: {% widthratio this_value max_value max_width as width %} {% blocktranslate %}The width is: {{ width }}{% endblocktranslate %}", + "type": "function", + "file_path": "django\\django\\template\\defaulttags.py", + "ast_data": "FunctionDef name:widthratio arg:parser arg:token arguments arg arg Assign Call If Compare Call Assign Assign If Compare Call Assign If Compare Raise Call Raise Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "mod", + "source_code": "def mod(x, y, name=None):\n with ops.name_scope(name, 'mod', [x, y]) as name:\n return gen_math_ops.floor_mod(x, y, name=name)", + "docstring": "Returns element-wise remainder of division. This follows Python semantics in that the result here is consistent with a flooring divide. E.g. , regardless of the signs of x and y. *NOTE*: supports broadcasting. More about broadcasting [here]( Args: x: A . Must be one of the following types: , , , , , , , , , , , . y: A . Must have the same type as . name: A name for the operation (optional). Returns: A . Has the same type as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:mod arg:x arg:y arg:name arguments arg arg arg With Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, *, h_pad=None, w_pad=None, hspace=None, wspace=None, rect=(0, 0, 1, 1), compress=False, **kwargs):\n super().__init__(**kwargs)\n self.set(w_pad=mpl.rcParams['figure.constrained_layout.w_pad'], h_pad=mpl.rcParams['figure.constrained_layout.h_pad'], wspace=mpl.rcParams['figure.constrained_layout.wspace'], hspace=mpl.rcParams['figure.constrained_layout.hspace'], rect=(0, 0, 1, 1))\n self.set(w_pad=w_pad, h_pad=h_pad, wspace=wspace, hspace=hspace, rect=rect)\n self._compress = compress", + "docstring": "Initialize `figure.constrained_layout.h_padfigure.constrained_layout.w_padfigure.constrained_layout.hspacefigure.constrained_layout.wspacecompressed_layout`.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg arg arg Call Call Call Call Assign" + }, + { + "library": "pytorch", + "name": "RendezvousTimeoutError", + "source_code": "class RendezvousTimeoutError(RendezvousError):\n pass", + "docstring": "Raised when a rendezvous did not complete on time.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py", + "ast_data": "ClassDef name:RendezvousTimeoutError" + }, + { + "library": "tensorflow", + "name": "_conv_2d_backprop_filter_flops", + "source_code": "@ops.RegisterStatistics('Conv2DBackpropFilter', 'flops')\ndef _conv_2d_backprop_filter_flops(graph, node):\n _verify_conv_data_format(node)\n image_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n image_shape.assert_is_fully_defined()\n kernel_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n kernel_shape.assert_is_fully_defined()\n strides_shape = list(node.attr['strides'].list.i)\n strides_product = strides_shape[1] * strides_shape[2]\n return ops.OpStats('flops', 2 * image_shape.num_elements() * kernel_shape.num_elements() / (image_shape.dims[-1].value * strides_product))", + "docstring": "Compute flops for Conv2DBackpropFilter operation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py", + "ast_data": "FunctionDef name:_conv_2d_backprop_filter_flops arg:graph arg:node arguments arg arg Call Assign Call Call Assign Call Call Assign Call Assign Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_make_fake_dataset_fn", + "source_code": "def _make_fake_dataset_fn(initial_delay_us, remainder_delay_us):\n\n def fake_dataset_fn(unused):\n del unused\n\n def make_dataset(time_us, num_elements):\n dataset = dataset_ops.Dataset.range(num_elements)\n if time_us > 0:\n dataset = dataset.apply(testing.sleep(time_us))\n return dataset\n if not initial_delay_us:\n return make_dataset(remainder_delay_us, 100)\n return make_dataset(initial_delay_us, 0).concatenate(make_dataset(remainder_delay_us, 100))\n return fake_dataset_fn", + "docstring": "Returns a dataset that emulates a remote storage data source. Returns a dataset factory which creates a dataset with 100 elements that emulates the performance characteristic of a file-based dataset stored in a remote storage. In particular, the first element will take an order of magnitude longer to produce than the remaining elements (100ms vs. 1ms). Args: initial_delay_us: How long to wait before producing the first element. remainder_delay_us: How long to wait before producing subsequent elements.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\benchmarks\\interleave_benchmark.py", + "ast_data": "FunctionDef name:_make_fake_dataset_fn arg:initial_delay_us arg:remainder_delay_us arguments arg arg FunctionDef name:fake_dataset_fn arg:unused arguments arg FunctionDef name:make_dataset arg:time_us arg:num_elements arguments arg arg Assign Call If Compare Assign Call Call Return return:yes If Return return:yes Call Return return:yes Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "parameters_to_vector", + "source_code": "def parameters_to_vector(parameters: Iterable[torch.Tensor]) -> torch.Tensor:\n param_device = None\n vec = []\n for param in parameters:\n param_device = _check_param_device(param, param_device)\n vec.append(param.view(-1))\n return torch.cat(vec)", + "docstring": "Flatten an iterable of parameters into a single vector. Args: parameters (Iterable[Tensor]): an iterable of Tensors that are the parameters of a model. Returns: The parameters represented by a single vector", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\convert_parameters.py", + "ast_data": "FunctionDef name:parameters_to_vector arg:parameters arguments arg Assign Assign For Assign Call Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "autoscale", + "source_code": "def autoscale(self, enable=True, axis='both', tight=None):\n if enable is None:\n scalex = True\n scaley = True\n else:\n if axis in ['x', 'both']:\n self.set_autoscalex_on(bool(enable))\n scalex = self.get_autoscalex_on()\n else:\n scalex = False\n if axis in ['y', 'both']:\n self.set_autoscaley_on(bool(enable))\n scaley = self.get_autoscaley_on()\n else:\n scaley = False\n if tight and scalex:\n self._xmargin = 0\n if tight and scaley:\n self._ymargin = 0\n if scalex:\n self._request_autoscale_view('x', tight=tight)\n if scaley:\n self._request_autoscale_view('y', tight=tight)", + "docstring": "Autoscale the axis view to the data (toggle). Convenience method for simple axis view autoscaling. It turns autoscaling on or off, and then, if autoscaling for either axis is on, it performs the autoscaling on the specified axis or Axes. Parameters ---------- enable : bool or None, default: True True turns autoscaling on, False turns it off. None leaves the autoscaling state unchanged. axis : {'both', 'x', 'y'}, default: 'both' The axis on which to operate. (For 3D Axes, *axis* can also be set to 'z', and 'both' refers to all three Axes.) tight : bool or None, default: None If True, first set the margins to zero. Then, this argument is forwarded to (regardless of its value); see the description of its behavior there.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:autoscale arg:self arg:enable arg:axis arg:tight arguments arg arg arg arg If Compare Assign Assign If Compare Call Call Assign Call Assign If Compare Call Call Assign Call Assign If BoolOp Assign If BoolOp Assign If Call If Call" + }, + { + "library": "pytorch", + "name": "check_training_mode", + "source_code": "def check_training_mode(op_train_mode: int, op_name: str) -> None:\n if GLOBALS.training_mode == _C_onnx.TrainingMode.PRESERVE:\n return\n if op_train_mode:\n op_mode_enum = _C_onnx.TrainingMode.TRAINING\n else:\n op_mode_enum = _C_onnx.TrainingMode.EVAL\n if op_mode_enum == GLOBALS.training_mode:\n return\n op_mode_text = f'train={bool(op_train_mode)}'\n warnings.warn(f\"ONNX export mode is set to {GLOBALS.training_mode}, but operator '{op_name}' is set to {op_mode_text}. Exporting with {op_mode_text}.\")", + "docstring": "Warns the user if the model's training mode and the export mode do not agree.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\symbolic_helper.py", + "ast_data": "FunctionDef name:check_training_mode arg:op_train_mode arg:op_name arguments arg arg If Compare Return return:no If Assign Assign If Compare Return return:no Assign Call Call" + }, + { + "library": "tensorflow", + "name": "get_next", + "source_code": "@abc.abstractmethod\ndef get_next(self):\n raise NotImplementedError('Iterator.get_next()')", + "docstring": "Returns the next element. >>> dataset = tf.data.Dataset.from_tensors(42) >>> iterator = iter(dataset) >>> print(iterator.get_next()) tf.Tensor(42, shape=(), dtype=int32) Returns: A (nested) structure of values matching . Raises: : If the end of the iterator has been reached.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py", + "ast_data": "FunctionDef name:get_next arg:self arguments arg Raise Call" + }, + { + "library": "pytorch", + "name": "increment", + "source_code": "def increment(self, event_name: str, key: str, value: int):\n if event_name not in self.get_stack():\n raise RuntimeError(f\"Event {repr(event_name)} not in {self.get_stack()}. Cannot add metadata to events that aren't in progress. Please make sure the event has started and hasn't ended.\")\n event_data = self.get_event_data()\n if event_name not in event_data:\n event_data[event_name] = {}\n if key not in event_data[event_name]:\n event_data[event_name][key] = 0\n event_data[event_name][key] += value", + "docstring": "Increment an integer event data field by the given amount", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:increment arg:self arg:event_name arg:key arg:value arguments arg arg arg arg If Compare Call Raise Call Call Call Assign Call If Compare Assign If Compare Assign" + }, + { + "library": "tensorflow", + "name": "_eval", + "source_code": "def _eval(self, tensor):\n return tensor.numpy()", + "docstring": "Returns the value in the tensor. Must be implemented in sub-classes.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py", + "ast_data": "FunctionDef name:_eval arg:self arg:tensor arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "bisect_keep_left", + "source_code": "def bisect_keep_left(a, fn):\n lo = 0\n hi = len(a)\n while lo < hi:\n mid = (lo + hi) // 2\n if fn(a[:mid + 1]):\n hi = mid\n else:\n lo = mid + 1\n return lo", + "docstring": "Find the index of the first element from the start of the array that verifies the given condition. The function is applied from the start of the array to the pivot.", + "type": "function", + "file_path": "django\\django\\contrib\\messages\\storage\\cookie.py", + "ast_data": "FunctionDef name:bisect_keep_left arg:a arg:fn arguments arg arg Assign Assign Call While Compare Assign If Call Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "state_dict_hook", + "source_code": "def state_dict_hook(module, destination, prefix, local_metadata):\n for submodule_name, submodule in module.named_modules():\n for attr_name, attr in submodule.__dict__.items():\n if isinstance(attr, ShardedTensor):\n mod_prefix = prefix + submodule_name\n key = mod_prefix + ('.' if mod_prefix else '') + attr_name\n destination[key] = attr", + "docstring": "Hook to add ShardedTensor to Module's `torch.nn.Module._register_state_dict_hook`.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\__init__.py", + "ast_data": "FunctionDef name:state_dict_hook arg:module arg:destination arg:prefix arg:local_metadata arguments arg arg arg arg For Call For Call If Call Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "ScopeType", + "source_code": "class ScopeType(enum.Enum):\n NO_SCOPE = 1\n VARIABLE_CREATION = 2\n NO_VARIABLE_CREATION = 3", + "docstring": "Enumerate scopes under which functions might be traced.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\tracing_compilation.py", + "ast_data": "ClassDef name:ScopeType Assign Assign Assign" + }, + { + "library": "scipy", + "name": "read_events", + "source_code": "def read_events(trace, options):\n trace_data = json.load(trace)\n\n def include_event(event, options):\n return event['ph'] == 'X' and event['dur'] >= options['granularity'] and (not event['name'].startswith('Total'))\n return [x for x in trace_data['traceEvents'] if include_event(x, options)]", + "docstring": "Reads all events from time-trace json file |trace|.", + "type": "function", + "file_path": "scipy\\tools\\ninjatracing.py", + "ast_data": "FunctionDef name:read_events arg:trace arg:options arguments arg arg Assign Call FunctionDef name:include_event arg:event arg:options arguments arg arg Return return:yes BoolOp Compare Compare Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "copy", + "source_code": "def copy(self, deep: bool | Literal['all']=True) -> Self:\n if deep:\n\n def copy_func(ax):\n return ax.copy(deep=True) if deep == 'all' else ax.view()\n new_axes = [copy_func(ax) for ax in self.axes]\n else:\n new_axes = [ax.view() for ax in self.axes]\n res = self.apply('copy', deep=deep)\n res.axes = new_axes\n if self.ndim > 1:\n blknos = self._blknos\n if blknos is not None:\n res._blknos = blknos.copy()\n res._blklocs = self._blklocs.copy()\n if deep:\n res._consolidate_inplace()\n return res", + "docstring": "Make deep or shallow copy of BlockManager Parameters ---------- deep : bool, string or None, default True If False or None, return a shallow copy (do not copy data) If 'all', copy data and a deep copy of the index Returns ------- BlockManager", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:copy arg:self arg:deep arguments arg arg If FunctionDef name:copy_func arg:ax arguments arg Return return:yes Compare Call Call Assign Call Assign Call Assign Call Assign If Compare Assign If Compare Assign Call Assign Call If Call Return return:yes" + }, + { + "library": "scipy", + "name": "_ExponentialModel", + "source_code": "class _ExponentialModel(Model):\n\n def __init__(self):\n super().__init__(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb, estimate=_exp_est, meta={'name': 'Exponential', 'equ': 'y= B_0 + exp(B_1 * x)', 'TeXequ': '$y=\\\\beta_0 + e^{\\\\beta_1 x}$'})", + "docstring": "Exponential model This model is defined by :math: Examples -------- We can calculate orthogonal distance regression with an exponential model: >>> from scipy import odr >>> import numpy as np >>> x = np.linspace(0.0, 5.0) >>> y = -10.0 + np.exp(0.5*x) >>> data = odr.Data(x, y) >>> odr_obj = odr.ODR(data, odr.exponential) >>> output = odr_obj.run() >>> print(output.beta) [-10. 0.5]", + "type": "class", + "file_path": "scipy\\scipy\\odr\\_models.py", + "ast_data": "ClassDef name:_ExponentialModel FunctionDef name:__init__ arg:self arguments arg Call Call" + }, + { + "library": "scipy", + "name": "standardize_bounds", + "source_code": "def standardize_bounds(bounds, x0, meth):\n if meth in {'trust-constr', 'powell', 'nelder-mead', 'cobyla', 'cobyqa', 'new'}:\n if not isinstance(bounds, Bounds):\n lb, ub = old_bound_to_new(bounds)\n bounds = Bounds(lb, ub)\n elif meth in ('l-bfgs-b', 'tnc', 'slsqp', 'old'):\n if isinstance(bounds, Bounds):\n bounds = new_bounds_to_old(bounds.lb, bounds.ub, x0.shape[0])\n return bounds", + "docstring": "Converts bounds to the form required by the solver.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_minimize.py", + "ast_data": "FunctionDef name:standardize_bounds arg:bounds arg:x0 arg:meth arguments arg arg arg If Compare If Call Assign Call Assign Call If Compare If Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "ConditionalGetMiddleware", + "source_code": "class ConditionalGetMiddleware(MiddlewareMixin):\n\n def process_response(self, request, response):\n if request.method != 'GET':\n return response\n if self.needs_etag(response) and (not response.has_header('ETag')):\n set_response_etag(response)\n etag = response.get('ETag')\n last_modified = response.get('Last-Modified')\n last_modified = last_modified and parse_http_date_safe(last_modified)\n if etag or last_modified:\n return get_conditional_response(request, etag=etag, last_modified=last_modified, response=response)\n return response\n\n def needs_etag(self, response):\n cache_control_headers = cc_delim_re.split(response.get('Cache-Control', ''))\n return all((header.lower() != 'no-store' for header in cache_control_headers))", + "docstring": "Handle conditional GET operations. If the response has an ETag or Last-Modified header and the request has If-None-Match or If-Modified-Since, replace the response with HttpNotModified. Add an ETag header if needed.", + "type": "class", + "file_path": "django\\django\\middleware\\http.py", + "ast_data": "ClassDef name:ConditionalGetMiddleware FunctionDef name:process_response arg:self arg:request arg:response arguments arg arg arg If Compare Return return:yes If BoolOp Call Call Call Assign Call Assign Call Assign BoolOp Call If BoolOp Return return:yes Call Return return:yes FunctionDef name:needs_etag arg:self arg:response arguments arg arg Assign Call Call Return return:yes Call Compare Call" + }, + { + "library": "scipy", + "name": "sampling_subspace", + "source_code": "def sampling_subspace(self):\n for ind, g in enumerate(self.g_cons):\n feasible = np.array([np.all(g(x_C, *self.g_args[ind]) >= 0.0) for x_C in self.C], dtype=bool)\n self.C = self.C[feasible]\n if self.C.size == 0:\n self.res.message = 'No sampling point found within the ' + 'feasible set. Increasing sampling ' + 'size.'\n if self.disp:\n logging.info(self.res.message)", + "docstring": "Find subspace of feasible points from g_func definition", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_shgo.py", + "ast_data": "FunctionDef name:sampling_subspace arg:self arguments arg For Call Assign Call Call Compare Call Assign If Compare Assign If Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, atomic_fn: atomic_function.AtomicFunction, shared_func_graph=True):\n self._arg_keywords = None\n self._num_positional_args = None\n self._func_graph = atomic_fn.graph\n self._captured_inputs = self._func_graph.external_captures + self._func_graph.deferred_external_captures\n self._function_type = atomic_fn.function_type\n self._output_shapes = tuple((output.shape for output in self._func_graph.outputs))\n self._attrs = attributes_lib.parse_func_attrs(atomic_fn.attributes or {})\n if shared_func_graph:\n self._garbage_collector = None\n else:\n self._garbage_collector = ConcreteFunctionGarbageCollector(atomic_fn.graph)\n self._delayed_rewrite_functions = _DelayedRewriteGradientFunctions(atomic_fn, self._garbage_collector)\n self._first_order_tape_functions = {}\n self._higher_order_tape_functions = {}\n self._inference_function = self._delayed_rewrite_functions.forward()", + "docstring": "Initialize a . Args: atomic_fn: Inference atomic function to form basis of forward pass. shared_func_graph: If False, the ConcreteFunction takes ownership of and will break reference cycles when it is deleted. This makes the FuncGraph inoperable. Raises: ValueError: If number of input_placeholders is not equal to the number of function inputs.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:atomic_fn arg:shared_func_graph arguments arg arg arg Assign Assign Assign Assign Assign Assign Call Assign Call BoolOp If Assign Assign Call Assign Call Assign Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "ragged_rank", + "source_code": "@property\ndef ragged_rank(self):\n return self._ragged_rank", + "docstring": "The number of times the RaggedTensor's flat_values is partitioned. Defaults to . Examples: >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]]) >>> tf.type_spec_from_value(values).ragged_rank 1 >>> rt1 = tf.RaggedTensor.from_uniform_row_length(values, 2) >>> tf.type_spec_from_value(rt1).ragged_rank 2 Returns: A Python indicating the number of times the underlying Tensor has been partitioned to add a new dimension. I.e., .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", + "ast_data": "FunctionDef name:ragged_rank arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "non_slot_devices", + "source_code": "def non_slot_devices(self, var_list):\n raise NotImplementedError('must be implemented in descendants')", + "docstring": "Device(s) for non-slot variables. DEPRECATED: TF 1.x ONLY. This method returns non-slot devices where non-slot variables are placed. Users can create non-slot variables on these devices by using a block: Args: var_list: The list of variables being optimized, needed with the default . Returns: A sequence of devices for non-slot variables.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:non_slot_devices arg:self arg:var_list arguments arg arg Raise Call" + }, + { + "library": "django", + "name": "ask_rename_model", + "source_code": "def ask_rename_model(self, old_model_state, new_model_state):\n return self.defaults.get('ask_rename_model', False)", + "docstring": "Was this model really renamed?", + "type": "method", + "file_path": "django\\django\\db\\migrations\\questioner.py", + "ast_data": "FunctionDef name:ask_rename_model arg:self arg:old_model_state arg:new_model_state arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_op_in_graph_mode", + "source_code": "def _op_in_graph_mode(tensor):\n if context.executing_eagerly():\n return tensor\n return tensor.op", + "docstring": "Returns the tensor's op in graph mode, or the tensor in eager mode. This is useful because sometimes an op is needed in graph mode instead of a tensor. In eager mode, there are no ops. Args: tensor: A tensor. Returns: The tensor's op in graph mode. The tensor in eager mode.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py", + "ast_data": "FunctionDef name:_op_in_graph_mode arg:tensor arguments arg If Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_convert_to_tensors_or_sparse_tensors", + "source_code": "def _convert_to_tensors_or_sparse_tensors(a, b):\n a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name='a')\n if a.dtype.base_dtype not in _VALID_DTYPES:\n raise TypeError(f\"'a' has invalid dtype `{a.dtype}` not in supported dtypes: `{_VALID_DTYPES}`.\")\n b = sparse_tensor.convert_to_tensor_or_sparse_tensor(b, name='b')\n if b.dtype.base_dtype != a.dtype.base_dtype:\n raise TypeError(\"Types don't match, %s vs %s.\" % (a.dtype, b.dtype))\n if isinstance(a, sparse_tensor.SparseTensor) and (not isinstance(b, sparse_tensor.SparseTensor)):\n return (b, a, True)\n return (a, b, False)", + "docstring": "Convert to tensor types, and flip order if necessary. Args: a: or of the same type as . b: or of the same type as . Returns: Tuple of , where and have been converted to or , and indicates whether the order has been flipped to make it dense,sparse instead of sparse,dense (since the set ops do not support the latter).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sets_impl.py", + "ast_data": "FunctionDef name:_convert_to_tensors_or_sparse_tensors arg:a arg:b arguments arg arg Assign Call If Compare Raise Call Assign Call If Compare Raise Call If BoolOp Call Call Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "_is_valid_na_for", + "source_code": "def _is_valid_na_for(self, dtype: DtypeObj) -> bool:\n if not self.is_na:\n return False\n blk = self.block\n if blk.dtype.kind == 'V':\n return True\n if blk.dtype == object:\n values = blk.values\n return all((is_valid_na_for_dtype(x, dtype) for x in values.ravel(order='K')))\n na_value = blk.fill_value\n if na_value is NaT and blk.dtype != dtype:\n return False\n if na_value is NA and needs_i8_conversion(dtype):\n return False\n return is_valid_na_for_dtype(na_value, dtype)", + "docstring": "Check that we are all-NA of a type/dtype that is compatible with this dtype. Augments with an additional check of the type of NA values.", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\concat.py", + "ast_data": "FunctionDef name:_is_valid_na_for arg:self arg:dtype arguments arg arg If Return return:yes Assign If Compare Return return:yes If Compare Assign Return return:yes Call Call Call Assign If BoolOp Compare Compare Return return:yes If BoolOp Compare Call Return return:yes Return return:yes Call" + }, + { + "library": "pandas", + "name": "apply_if_callable", + "source_code": "def apply_if_callable(maybe_callable, obj, **kwargs):\n if callable(maybe_callable):\n return maybe_callable(obj, **kwargs)\n return maybe_callable", + "docstring": "Evaluate possibly callable input using obj and kwargs if it is callable, otherwise return as it is. Parameters ---------- maybe_callable : possibly a callable obj : NDFrame **kwargs", + "type": "function", + "file_path": "pandas\\pandas\\core\\common.py", + "ast_data": "FunctionDef name:apply_if_callable arg:maybe_callable arg:obj arguments arg arg arg If Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "split", + "source_code": "def split(tensor, split_dimension, num_devices, assign_tuple_sharding=False, use_sharding_op=False, input_shape=None):\n return Sharding.split(tensor, split_dimension, num_devices, input_shape).apply_to_tensor(tensor, assign_tuple_sharding=assign_tuple_sharding, use_sharding_op=use_sharding_op)", + "docstring": "Returns a tensor that is split along the given dimension. Args: tensor: A tf.Tensor to split. split_dimension: The dimension to split. num_devices: The number of devices to partition the dimension. assign_tuple_sharding: If the sharding type should be a tuple. use_sharding_op: If true, adds a sharding op to set the sharding. input_shape: The full shape of the input tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py", + "ast_data": "FunctionDef name:split arg:tensor arg:split_dimension arg:num_devices arg:assign_tuple_sharding arg:use_sharding_op arg:input_shape arguments arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_state_shape", + "source_code": "def get_state_shape(s):\n c = _concat(batch_size, s)\n size = array_ops.zeros(c, dtype=dtype)\n if not context.executing_eagerly():\n c_static = _concat(batch_size, s, static=True)\n size.set_shape(c_static)\n return size", + "docstring": "Combine s with batch_size to get a proper tensor shape.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\rnn_cell_impl.py", + "ast_data": "FunctionDef name:get_state_shape arg:s arguments arg Assign Call Assign Call If Call Assign Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "read_file_header", + "source_code": "def read_file_header(self):\n hdict = {}\n hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header']\n hdr = read_dtype(self.mat_stream, hdr_dtype)\n hdict['__header__'] = hdr['description'].item().strip(b' \\t\\n\\x00')\n v_major = hdr['version'] >> 8\n v_minor = hdr['version'] & 255\n hdict['__version__'] = f'{v_major}.{v_minor}'\n return hdict", + "docstring": "Read in mat 5 file header", + "type": "method", + "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py", + "ast_data": "FunctionDef name:read_file_header arg:self arguments arg Assign Assign Assign Call Assign Call Call Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "benchmark_defun_vs_map_fn", + "source_code": "def benchmark_defun_vs_map_fn(self):\n\n @def_function.function(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])\n def defun(x):\n return array_ops.identity(x)\n\n def fn(x):\n return array_ops.identity(x)\n base = math_ops.range(10000)\n for input_size in [10, 100, 1000, 10000]:\n num_iters = 10000 // input_size\n map_defun_op = map_defun.map_defun(defun, [base], [dtypes.int32], [()])\n map_fn_op = map_fn.map_fn(fn, base)\n self._run(op=map_defun_op, name='with_defun_size_%d' % input_size, num_iters=num_iters, benchmark_id=1)\n self._run(op=map_fn_op, name='without_defun_size_%d' % input_size, num_iters=num_iters, benchmark_id=2)", + "docstring": "Benchmarks to compare the performance of MapDefun vs tf.map_fn.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\benchmarks\\map_defun_benchmark.py", + "ast_data": "FunctionDef name:benchmark_defun_vs_map_fn arg:self arguments arg FunctionDef name:defun arg:x arguments arg Return return:yes Call Call Call FunctionDef name:fn arg:x arguments arg Return return:yes Call Assign Call For Assign Assign Call Assign Call Call Call" + }, + { + "library": "django", + "name": "save_form", + "source_code": "def save_form(self, request, form, change):\n return form.save(commit=False)", + "docstring": "Given a ModelForm return an unsaved instance. `` is True if the object is being changed, and False if it's being added.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:save_form arg:self arg:request arg:form arg:change arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "synchronize", + "source_code": "def synchronize(store, data: bytes, rank: int, world_size: int, key_prefix: str, timeout: float=300) -> list[bytes]:\n with store_timeout(store, timeout):\n store.set(f'{key_prefix}{rank}', data)\n agent_data = get_all(store, rank, key_prefix, world_size)\n return agent_data", + "docstring": "Synchronizes `` will be available on each of the agents. Note: The data on the path is not deleted, as a result there can be stale data if you use the same key_prefix twice. Time complexity: O(N) per worker, O(N^2) globally.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\elastic\\utils\\store.py", + "ast_data": "FunctionDef name:synchronize arg:store arg:data arg:rank arg:world_size arg:key_prefix arg:timeout arguments arg arg arg arg arg arg With Call Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "check_in_bounds_for_storage", + "source_code": "def check_in_bounds_for_storage(a: torch.TypedStorage, shape: ShapeType, strides: StrideType, storage_offset: int):\n required_length = compute_required_storage_length(shape, strides, storage_offset)\n if a.size() < required_length:\n msg = f\"Can't view a storage of size {a.size()} with an offset of {storage_offset}, shape of {str(shape)}, and strides of {str(strides)}, which requires a storage of size {required_length}\"\n raise ValueError(msg)", + "docstring": "Determines if the given shape, strides, and offset are valid for the given storage.", + "type": "function", + "file_path": "pytorch\\torch\\_prims_common\\__init__.py", + "ast_data": "FunctionDef name:check_in_bounds_for_storage arg:a arg:shape arg:strides arg:storage_offset arguments arg arg arg arg Assign Call If Compare Call Assign Call Call Call Raise Call" + }, + { + "library": "scipy", + "name": "genhalflogistic_gen", + "source_code": "class genhalflogistic_gen(rv_continuous):\n\n def _shape_info(self):\n return [_ShapeInfo('c', False, (0, np.inf), (False, False))]\n\n def _get_support(self, c):\n return (self.a, 1.0 / c)\n\n def _pdf(self, x, c):\n limit = 1.0 / c\n tmp = np.asarray(1 - c * x)\n tmp0 = tmp ** (limit - 1)\n tmp2 = tmp0 * tmp\n return 2 * tmp0 / (1 + tmp2) ** 2\n\n def _cdf(self, x, c):\n limit = 1.0 / c\n tmp = np.asarray(1 - c * x)\n tmp2 = tmp ** limit\n return (1.0 - tmp2) / (1 + tmp2)\n\n def _ppf(self, q, c):\n return 1.0 / c * (1 - ((1.0 - q) / (1.0 + q)) ** c)\n\n def _entropy(self, c):\n return 2 - (2 * c + 1) * np.log(2)", + "docstring": "A generalized half-logistic continuous random variable. %(before_notes)s Notes ----- The probability density function for is: .. math:: f(x, c) = \\frac{2 (1 - c x)^{1/(c-1)}}{[1 + (1 - c x)^{1/c}]^2} for :math:, and :math:. takes `c`. %(after_notes)s %(example)s", + "type": "class", + "file_path": "scipy\\scipy\\stats\\_continuous_distns.py", + "ast_data": "ClassDef name:genhalflogistic_gen FunctionDef name:_shape_info arg:self arguments arg Return return:yes Call FunctionDef name:_get_support arg:self arg:c arguments arg arg Return return:yes FunctionDef name:_pdf arg:self arg:x arg:c arguments arg arg arg Assign Assign Call Assign Assign Return return:yes FunctionDef name:_cdf arg:self arg:x arg:c arguments arg arg arg Assign Assign Call Assign Return return:yes FunctionDef name:_ppf arg:self arg:q arg:c arguments arg arg arg Return return:yes FunctionDef name:_entropy arg:self arg:c arguments arg arg Return return:yes Call" + }, + { + "library": "scrapy", + "name": "find_by_request", + "source_code": "def find_by_request(self, request: Request) -> list[str]:\n return [name for name, cls in self._spiders.items() if cls.handles_request(request)]", + "docstring": "Return the list of spider names that can handle the given request.", + "type": "method", + "file_path": "scrapy\\scrapy\\spiderloader.py", + "ast_data": "FunctionDef name:find_by_request arg:self arg:request arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "init_cooperative_reduction", + "source_code": "def init_cooperative_reduction(self):\n assert self.cooperative_reduction\n for tree in self.range_trees:\n if tree.grid_dim is not None:\n tree.grid_dim += 1\n sem_count = self.numels['x']\n if self.fixed_config:\n sem_count = CeilDiv(sem_count, self.fixed_config['XBLOCK'])\n self.semaphores_name = self.args.semaphores(sem_count)\n self.cooperative_reduction_workspace_cache = CooperativeReductionWorkspaceCache(self.args)\n self.body.splice(' RSPLIT_NEXT_POWER_OF_2: tl.constexpr = triton_helpers.constexpr_next_power_of_2(RSPLIT)\\n RSPLIT_IS_POWER_OF_2: tl.constexpr = RSPLIT == RSPLIT_NEXT_POWER_OF_2\\n HAS_RSPLIT: tl.constexpr = RSPLIT > 1\\n rsplit_id = tl.program_id(0)\\n num_rblocks = (rnumel + RBLOCK - 1) // RBLOCK\\n rsplit_chunk = (num_rblocks + RSPLIT - 1) // RSPLIT * RBLOCK\\n rsplit_start = rsplit_chunk * rsplit_id\\n rsplit_end = rsplit_chunk * (rsplit_id + 1)\\n ')\n if any((not self._has_constant_mask(tree) for tree in self.range_trees if tree.is_reduction)):\n self.body.writeline('rsplit_end = tl.where(rsplit_end < rnumel, rsplit_end, rnumel)')", + "docstring": "One time setup code for cooperative reductions.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py", + "ast_data": "FunctionDef name:init_cooperative_reduction arg:self arguments arg For If Compare Assign If Assign Call Assign Call Assign Call Call If Call Call Call" + }, + { + "library": "tensorflow", + "name": "set_support_graph_mode_gradient", + "source_code": "def set_support_graph_mode_gradient(self):\n self._support_graph_mode_gradient = True", + "docstring": "Indicates the object shall support gradient ops. This function is internally used by _EagerPyFuncGrad to support graph mode gradient of EagerFunc via tf.gradient().", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py", + "ast_data": "FunctionDef name:set_support_graph_mode_gradient arg:self arguments arg Assign" + }, + { + "library": "pytorch", + "name": "_determine_output_number", + "source_code": "def _determine_output_number(signature: _schemas.OpSignature, named_attrs: Mapping[str, ValidAttributeType]) -> int:\n if signature.domain == '':\n if signature.name == 'BatchNormalization':\n if not named_attrs.get('training_mode', 0):\n return 1\n if signature.name == 'Split':\n num_outputs = named_attrs.get('num_outputs')\n if num_outputs is not None and isinstance(num_outputs, int):\n return num_outputs\n else:\n raise ValueError('Could not determine the number of outputs for Split. num_outputs must be provided')\n return len(signature.outputs)", + "docstring": "Determine the number of outputs for the node with heuristics.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_building.py", + "ast_data": "FunctionDef name:_determine_output_number arg:signature arg:named_attrs arguments arg arg If Compare If Compare If Call Return return:yes If Compare Assign Call If BoolOp Compare Call Return return:yes Raise Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "derivative", + "source_code": "def derivative(self, x, der=1):\n x, x_shape = self._prepare_x(x)\n y = self._evaluate_derivatives(x, der + 1)\n return self._finish_y(y[der], x_shape)", + "docstring": "Evaluate a single derivative of the polynomial at the point . Parameters ---------- x : array_like Point or points at which to evaluate the derivatives der : integer, optional Which derivative to evaluate (default: first derivative). This number includes the function value as 0th derivative. Returns ------- d : ndarray Derivative interpolated at the x-points. Shape of is determined by replacing the interpolation axis in the original array with the shape of . Notes ----- This may be computed by evaluating all derivatives up to the desired one (using self.derivatives()) and then discarding the rest.", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_polyint.py", + "ast_data": "FunctionDef name:derivative arg:self arg:x arg:der arguments arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "stop", + "source_code": "def stop(self, *args: Any) -> None:\n self.exit_event.set()", + "docstring": "Exits the program gracefully. this shuts down the logging loop.", + "type": "method", + "file_path": "pytorch\\tools\\stats\\monitor.py", + "ast_data": "FunctionDef name:stop arg:self arguments arg arg Call" + }, + { + "library": "django", + "name": "acycle_key", + "source_code": "async def acycle_key(self):\n data = await self._aget_session()\n key = self.session_key\n await self.acreate()\n self._session_cache = data\n if key:\n await self.adelete(key)", + "docstring": "Create a new session key, while retaining the current session data.", + "type": "method", + "file_path": "django\\django\\contrib\\sessions\\backends\\base.py", + "ast_data": "AsyncFunctionDef name:acycle_key arg:self arguments arg Assign Call Assign Call Assign If Call" + }, + { + "library": "tensorflow", + "name": "_substitute_quantized_function_name_template", + "source_code": "def _substitute_quantized_function_name_template(module: str) -> str:\n compiled_regex = re.compile('GenerateQuantizedFunctionName(\\\\([\\\\w\\\\s\\\\\\'\\\\\"\\\\[\\\\],]+\\\\))')\n while True:\n func_match = re.search(compiled_regex, module)\n if func_match is None:\n break\n argument_string = func_match.group(1)\n if not argument_string.endswith(',)'):\n argument_string = argument_string[:-1] + ',)'\n arguments = ast.literal_eval(argument_string)\n if len(arguments) < 1 or len(arguments) > 2:\n raise ValueError('Wrong number of arguments to GenerateQuantizedFunctionName')\n quantized_ops = arguments[0]\n if not quantized_ops:\n raise ValueError('The quantized_ops list must not be empty')\n function_name = 'quantized_{}'.format(_format_snake_case_op_name(quantized_ops[0]))\n if len(quantized_ops) > 1:\n function_name += '_with_{}'.format(_format_snake_case_op_name(quantized_ops[1]))\n if len(quantized_ops) > 1:\n for quantized_op in quantized_ops[2:]:\n function_name += '_and_{}'.format(_format_snake_case_op_name(quantized_op))\n suffix = '_fn'\n if len(arguments) > 1 and arguments[1] == 'f32':\n suffix = '_float_output_fn'\n function_name += suffix\n module = re.sub(compiled_regex, function_name, module, count=1)\n return module", + "docstring": "Generates the quantized function name.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\gen_quantized_function_library.py", + "ast_data": "FunctionDef name:_substitute_quantized_function_name_template arg:module arguments arg Assign Call While Assign Call If Compare Assign Call If Call Assign Assign Call If BoolOp Compare Call Compare Call Raise Call Assign If Raise Call Assign Call Call If Compare Call Call Call If Compare Call For Call Call Assign If BoolOp Compare Call Compare Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "ShardingCallback", + "source_code": "@tf_export.tf_export('train.experimental.ShardingCallback')\nclass ShardingCallback(abc.ABC):\n\n @property\n @abc.abstractmethod\n def description(self) -> str:\n pass\n\n @abc.abstractmethod\n def __call__(self, shardable_tensors: Sequence[ShardableTensor]) -> Sequence[Shard]:\n pass\n\n def __hash__(self) -> int:\n hash_val = hash(self.description)\n for attr_name, attr_val in vars(self).items():\n if not (inspect.ismethod(attr_val) or inspect.isfunction(attr_val)):\n hash_val ^= hash(attr_name)\n if isinstance(attr_val, Hashable):\n hash_val ^= hash(attr_val)\n return hash_val", + "docstring": "Checkpoint sharding callback function, along with a text description. A callback function wrapper that will be executed to determine how tensors will be split into shards when the saver writes the checkpoint shards to disk. The callback takes a list of s as input (as well as any kwargs defined by the subclass), and organizes the input tensors into different shards. Tensors are first organized by device task (see ), then the callback will be called for each collection of tensors. There are a few restrictions to keep in mind when creating a custom callback: - Tensors must not be removed from the checkpoint. - Tensors must not be reshaped. - Tensor dtypes must not change. - Tensors within a shard must belong to the same task. Validation checks will be performed after the callback function is executed to ensure these restrictions aren't violated. Here's an example of a simple custom callback: The attribute is used to identify the callback and to aid debugging during saving and restoration. To take in kwargs, simply define the constructor and pass them in:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\sharding\\sharding_util.py", + "ast_data": "ClassDef name:ShardingCallback FunctionDef name:description arg:self arguments arg FunctionDef name:__call__ arg:self arg:shardable_tensors arguments arg arg FunctionDef name:__hash__ arg:self arguments arg Assign Call For Call Call If BoolOp Call Call Call If Call Call Return return:yes Call" + }, + { + "library": "authlib", + "name": "validate_revocation_endpoint_auth_methods_supported", + "source_code": "def validate_revocation_endpoint_auth_methods_supported(self):\n validate_array_value(self, 'revocation_endpoint_auth_methods_supported')", + "docstring": "OPTIONAL. JSON array containing a list of client authentication methods supported by this revocation endpoint. The valid client authentication method values are those registered in the IANA \"OAuth Token Endpoint Authentication Methods\" registry [IANA.OAuth.Parameters]. If omitted, the default is \"client_secret_basic\" -- the HTTP Basic Authentication Scheme specified in Section 2.3.1 of OAuth 2.0 [RFC6749].", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py", + "ast_data": "FunctionDef name:validate_revocation_endpoint_auth_methods_supported arg:self arguments arg Call" + }, + { + "library": "matplotlib", + "name": "set_pad", + "source_code": "def set_pad(self, pad):\n self._pad = pad", + "docstring": "Set the internal pad in points. The actual pad will be the sum of the internal pad and the external pad (the latter is set automatically by the ). Parameters ---------- pad : float The internal pad in points.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py", + "ast_data": "FunctionDef name:set_pad arg:self arg:pad arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "_add_sparse_to_tensors_map", + "source_code": "def _add_sparse_to_tensors_map(sp_input, container=None, shared_name=None, name=None):\n sp_input = _convert_to_sparse_tensor(sp_input)\n return gen_sparse_ops.add_sparse_to_tensors_map(sp_input.indices, sp_input.values, sp_input.dense_shape, container=container, shared_name=shared_name, name=name)", + "docstring": "Add a to a and return its handle. Args: sp_input: The input . container: The container for the underlying (optional). shared_name: The shared name for the underlying (optional, defaults to the name of the newly created op). name: A name prefix for the returned tensors (optional). Returns: A string 1-vector (1D ), with the single element representing the a unique handle to a stored by the underlying this op. Raises: TypeError: If is not a .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", + "ast_data": "FunctionDef name:_add_sparse_to_tensors_map arg:sp_input arg:container arg:shared_name arg:name arguments arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_lr", + "source_code": "@override\ndef get_lr(self) -> list[float]:\n _warn_get_lr_called_within_step(self)\n return [base_lr * lmbda(self.last_epoch) for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs)]", + "docstring": "Compute learning rate.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "FunctionDef name:get_lr arg:self arguments arg Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "max_pool3d_with_indices", + "source_code": "def max_pool3d_with_indices(input: Tensor, kernel_size: BroadcastingList3[int], stride: Optional[BroadcastingList3[int]]=None, padding: BroadcastingList3[int]=0, dilation: BroadcastingList3[int]=1, ceil_mode: bool=False, return_indices: bool=False) -> tuple[Tensor, Tensor]:\n if has_torch_function_unary(input):\n return handle_torch_function(max_pool3d_with_indices, (input,), input, kernel_size, stride=stride, padding=padding, dilation=dilation, ceil_mode=ceil_mode, return_indices=return_indices)\n if stride is None:\n stride = torch.jit.annotate(list[int], [])\n return torch._C._nn.max_pool3d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode)", + "docstring": "max_pool3d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False) Applies a 3D max pooling over an input signal composed of several input planes. .. note:: The order of :attr: and :attr: is different from what seen in :class:, and will change in a future release. See :class: for details. Args: input: input tensor :math:, minibatch dim optional. kernel_size: size of the pooling region. Can be a single number or a tuple stride: stride of the pooling operation. Can be a single number or a tuple . Default: :attr: padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and 0. ceil_mode: If `ceilfloortorch.nn.functional.max_unpool3d` later", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:max_pool3d_with_indices arg:input arg:kernel_size arg:stride arg:padding arg:dilation arg:ceil_mode arg:return_indices arguments arg arg arg arg arg arg arg If Call Return return:yes Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_labels", + "source_code": "def get_labels(github_repo: str, github_token: str, pr_number: int) -> set[str]:\n pr_info = get_pr_info(github_repo, github_token, pr_number)\n return {label.get('name') for label in pr_info.get('labels', []) if label.get('name')}", + "docstring": "Dynamically get the latest list of labels from the pull request", + "type": "function", + "file_path": "pytorch\\.github\\scripts\\runner_determinator.py", + "ast_data": "FunctionDef name:get_labels arg:github_repo arg:github_token arg:pr_number arguments arg arg arg Assign Call Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "_ensure_in_unit_hypercube", + "source_code": "def _ensure_in_unit_hypercube(sample: 'npt.ArrayLike') -> np.ndarray:\n sample = np.asarray(sample, dtype=np.float64, order='C')\n if not sample.ndim == 2:\n raise ValueError('Sample is not a 2D array')\n if sample.max() > 1.0 or sample.min() < 0.0:\n raise ValueError('Sample is not in unit hypercube')\n return sample", + "docstring": "Ensure that sample is a 2D array and is within a unit hypercube Parameters ---------- sample : array_like (n, d) A 2D array of points. Returns ------- np.ndarray The array interpretation of the input sample Raises ------ ValueError If the input is not a 2D array or contains points outside of a unit hypercube.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_qmc.py", + "ast_data": "FunctionDef name:_ensure_in_unit_hypercube arg:sample arguments arg Assign Call If Compare Raise Call If BoolOp Compare Call Compare Call Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "name", + "source_code": "@property\ndef name(self):\n return self._name", + "docstring": "Returns the (optionally provided) name of the described tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "geom_type", + "source_code": "@property\ndef geom_type(self):\n return capi.geos_type(self.ptr).decode()", + "docstring": "Return a string representing the Geometry type, e.g. 'Polygon'", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:geom_type arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_find_index_of_defining_frame", + "source_code": "def _find_index_of_defining_frame(tb):\n size = len(tb)\n filenames = [frame.filename for frame in tb]\n for idx, filename in enumerate(reversed(filenames)):\n is_framework = _is_framework_filename(filename)\n if not is_framework:\n return size - idx - 1\n return 0", + "docstring": "Return index in op.traceback with first 'useful' frame. This method reads through the stack stored in op.traceback looking for the innermost frame which (hopefully) belongs to the caller. It accomplishes this by rejecting frames deemed to be part of the TensorFlow framework (by pattern matching the filename). Args: tb: A list of traceback frames (as from Operation.traceback). Returns: Integer index into op.traceback where the first non-TF file was found (innermost to outermost), or 0 (for the outermost stack frame) if all files came from TensorFlow.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\error_interpolation.py", + "ast_data": "FunctionDef name:_find_index_of_defining_frame arg:tb arguments arg Assign Call Assign For Call Call Assign Call If Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "assoc_in", + "source_code": "def assoc_in(d, keys, value, factory=dict):\n return update_in(d, keys, lambda x: value, value, factory)", + "docstring": "Return a new dict with new, potentially nested, key value pair >>> purchase = { ... \"name\": \"Alice\", ... \"order\": {\"items\": [\"Apple\", \"Orange\"], \"costs\": [0.50, 1.25]}, ... \"credit card\": \"5555-1234-1234-1234\", ... } >>> assoc_in(purchase, [\"order\", \"costs\"], [0.25, 1.00]) # doctest: +SKIP {'credit card': '5555-1234-1234-1234', 'name': 'Alice', 'order': {'costs': [0.25, 1.00], 'items': ['Apple', 'Orange']}}", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py", + "ast_data": "FunctionDef name:assoc_in arg:d arg:keys arg:value arg:factory arguments arg arg arg arg Return return:yes Call arguments arg" + }, + { + "library": "pandas", + "name": "_equals_tag", + "source_code": "def _equals_tag(self, obj, tag) -> bool:\n raise AbstractMethodError(self)", + "docstring": "Return whether an individual DOM node matches a tag Parameters ---------- obj : node-like A DOM node. tag : str Tag name to be checked for equality. Returns ------- boolean Whether 's tag name is", + "type": "method", + "file_path": "pandas\\pandas\\io\\html.py", + "ast_data": "FunctionDef name:_equals_tag arg:self arg:obj arg:tag arguments arg arg arg Raise Call" + }, + { + "library": "kornia", + "name": "unproject", + "source_code": "def unproject(self, points: Vector2, depth: Tensor | float) -> Vector3:\n if isinstance(depth, (float, int)):\n depth = Tensor([depth])\n return Vector3.from_coords(points.x * depth, points.y * depth, depth)", + "docstring": "Unproject one or more Vector2 from the canonical z=1 plane into the camera frame. Args: points: Vector2 representing the points to unproject. depth: Tensor representing the depth of the points to unproject. Returns: Vector3 representing the unprojected points. Example: >>> points = Vector2.from_coords(1., 2.) >>> Z1Projection().unproject(points, 3) x: tensor([3.]) y: tensor([6.]) z: tensor([3.])", + "type": "method", + "file_path": "kornia\\kornia\\sensors\\camera\\projection_model.py", + "ast_data": "FunctionDef name:unproject arg:self arg:points arg:depth arguments arg arg arg If Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "sharded_type_as_check", + "source_code": "def sharded_type_as_check(*args, **kwargs):\n if len(args) < 2:\n raise ValueError('Needs to give a tensor to cast type as!')\n if not isinstance(args[1], torch.Tensor) and (not isinstance(args[1], ShardedTensor)):\n raise ValueError('Needs to give a Tensor or ShardedTensor to cast type as!')", + "docstring": "Perform extra checks for the sharded_type_as op such as the input needs to be either a Tensor or ShardedTensor. Args: same as ``. Return: None", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\_ops\\tensor_ops.py", + "ast_data": "FunctionDef name:sharded_type_as_check arguments arg arg If Compare Call Raise Call If BoolOp Call Call Raise Call" + }, + { + "library": "scipy", + "name": "_default_encoded_fill_value", + "source_code": "def _default_encoded_fill_value(self):\n nc_type = REVERSE[self.typecode(), self.itemsize()]\n return FILLMAP[nc_type]", + "docstring": "The default encoded fill-value for this Variable's data type.", + "type": "method", + "file_path": "scipy\\scipy\\io\\_netcdf.py", + "ast_data": "FunctionDef name:_default_encoded_fill_value arg:self arguments arg Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "codegen_body", + "source_code": "def codegen_body(self) -> None:\n if self.multistage_reduction_entry:\n with self.body.indent():\n self.body.splice(self.loads)\n self.body.splice(self.compute)\n self.body.writeline('}')\n self.cse.invalidate(OrderedSet(self.cse.reduction_cache.values()))\n self.multistage_reduction_entry.cache_clear()\n self.multistage_reduction_entry = None\n else:\n self.body.splice(self.loads)\n self.body.splice(self.compute)\n self.body.splice(self.stores)\n self.loads.clear()\n self.compute.clear()\n self.stores.clear()", + "docstring": "Concat output code from index_code, loads, compute, stores, suffix into self.body. For pointwise kernels, this is called just once at the end. For reduction kernels, this generates a loop over the reduction axis.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\mps.py", + "ast_data": "FunctionDef name:codegen_body arg:self arguments arg If With Call Call Call Call Call Call Call Call Assign Call Call Call Call Call Call" + }, + { + "library": "pandas", + "name": "set_object_info", + "source_code": "def set_object_info(self) -> None:\n self.attrs.pandas_type = str(self.pandas_kind)\n self.attrs.pandas_version = str(_version)", + "docstring": "set my pandas type & version", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:set_object_info arg:self arguments arg Assign Call Assign Call" + }, + { + "library": "django", + "name": "get_valid_filename", + "source_code": "@keep_lazy_text\ndef get_valid_filename(name):\n s = str(name).strip().replace(' ', '_')\n s = re.sub('(?u)[^-\\\\w.]', '', s)\n if s in {'', '.', '..'}:\n raise SuspiciousFileOperation(\"Could not derive file name from '%s'\" % name)\n return s", + "docstring": "Return the given string converted to a string that can be used for a clean filename. Remove leading and trailing spaces; convert other spaces to underscores; and remove anything that is not an alphanumeric, dash, underscore, or dot. >>> get_valid_filename(\"john's portrait in 2004.jpg\") 'johns_portrait_in_2004.jpg'", + "type": "function", + "file_path": "django\\django\\utils\\text.py", + "ast_data": "FunctionDef name:get_valid_filename arg:name arguments arg Assign Call Call Call Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "rsample", + "source_code": "def rsample(self, sample_shape: _size=torch.Size()) -> Tensor:\n x = self.base_dist.rsample(sample_shape)\n for transform in self.transforms:\n x = transform(x)\n return x", + "docstring": "Generates a sample_shape shaped reparameterized sample or sample_shape shaped batch of reparameterized samples if the distribution parameters are batched. Samples first from base distribution and applies for every transform in the list.", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\transformed_distribution.py", + "ast_data": "FunctionDef name:rsample arg:self arg:sample_shape arguments arg arg Call Assign Call For Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "register", + "source_code": "def register(self, op):\n if not hasattr(op, FALLBACK_DISPATCH_ATTR):\n raise AssertionError('Dispatching not enabled for %s' % op)\n getattr(op, FALLBACK_DISPATCH_ATTR).append(self)", + "docstring": "Register this dispatcher as a handler for . Args: op: Python function: the TensorFlow operation that should be handled. Must have a dispatch list (which is added automatically for generated ops, and can be added to Python ops using the decorator).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py", + "ast_data": "FunctionDef name:register arg:self arg:op arguments arg arg If Call Raise Call Call Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, axis, *, base=10, subs=None, nonpositive='clip'):\n self._transform = LogTransform(base, nonpositive)\n self.subs = subs", + "docstring": "Parameters ---------- axis : The axis for the scale. base : float, default: 10 The base of the logarithm. nonpositive : {'clip', 'mask'}, default: 'clip' Determines the behavior for non-positive values. They can either be masked as invalid, or clipped to a very small positive number. subs : sequence of int, default: None Where to place the subticks between each major tick. For example, in a log10 scale, `` will place 8 logarithmically spaced minor ticks between each major tick.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\scale.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:axis arguments arg arg arg arg arg Assign Call Assign" + }, + { + "library": "matplotlib", + "name": "set_url", + "source_code": "def set_url(self, url):\n super().set_url(url)\n self.label1.set_url(url)\n self.label2.set_url(url)\n self.stale = True", + "docstring": "Set the url of label1 and label2. Parameters ---------- url : str", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:set_url arg:self arg:url arguments arg arg Call Call Call Call Assign" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, initial_loss_scale, growth_steps, multiplier):\n super(_DynamicLossScaleState, self).__init__()\n self._initial_loss_scale = float(initial_loss_scale)\n self._growth_steps = int(growth_steps)\n self._multiplier = float(multiplier)\n self._weights = {}\n self._current_loss_scale = self._add_weight(name='current_loss_scale', dtype=dtypes.float32, initial_value=self._initial_loss_scale)\n self._counter = self._add_weight(name='good_steps', dtype=dtypes.int64, initial_value=0)", + "docstring": "Creates the dynamic loss scale.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:initial_loss_scale arg:growth_steps arg:multiplier arguments arg arg arg arg Call Call Assign Call Assign Call Assign Call Assign Assign Call Assign Call" + }, + { + "library": "kornia", + "name": "spatial_softmax2d", + "source_code": "def spatial_softmax2d(input: Tensor, temperature: Optional[Tensor]=None) -> Tensor:\n _validate_batched_image_tensor_input(input)\n batch_size, channels, height, width = input.shape\n if temperature is None:\n temperature = torch.tensor(1.0)\n temperature = temperature.to(device=input.device, dtype=input.dtype)\n x = input.view(batch_size, channels, -1)\n x_soft = softmax(x * temperature, dim=-1)\n return x_soft.view(batch_size, channels, height, width)", + "docstring": "Apply the Softmax function over features in each image channel. Note that this function behaves differently to :py:class:, which instead applies Softmax over features at each spatial location. Args: input: the input tensor with shape :math:. temperature: factor to apply to input, adjusting the \"smoothness\" of the output distribution. Returns: a 2D probability distribution per image channel with shape :math:. Examples: >>> heatmaps = torch.tensor([[[ ... [0., 0., 0.], ... [0., 0., 0.], ... [0., 1., 2.]]]]) >>> spatial_softmax2d(heatmaps) tensor([[[[0.0585, 0.0585, 0.0585], [0.0585, 0.0585, 0.0585], [0.0585, 0.1589, 0.4319]]]])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\subpix\\dsnt.py", + "ast_data": "FunctionDef name:spatial_softmax2d arg:input arg:temperature arguments arg arg Call Assign If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "TPULazyDistributedVariable", + "source_code": "class TPULazyDistributedVariable(TPUDistributedVariable):\n\n def _initialize_if_uninitialized(self):\n if getattr(self, '_is_lazily_initialized', False):\n return\n self._lazy_scope.initialize_all()\n self._is_lazily_initialized = True\n\n def assign_sub(self, value, use_locking=False, name=None, read_value=True):\n self._initialize_if_uninitialized()\n return super().assign_sub(value, use_locking, name, read_value)\n\n def assign_add(self, value, use_locking=False, name=None, read_value=True):\n self._initialize_if_uninitialized()\n return super().assign_add(value, use_locking, name, read_value)\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n self._initialize_if_uninitialized()\n return super().assign(value, use_locking, name, read_value)\n\n def read_value(self):\n self._initialize_if_uninitialized()\n return super().read_value()", + "docstring": "TPU Mirrored variable to be initialized lazily in a batch.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_values.py", + "ast_data": "ClassDef name:TPULazyDistributedVariable FunctionDef name:_initialize_if_uninitialized arg:self arguments arg If Call Return return:no Call Assign FunctionDef name:assign_sub arg:self arg:value arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg Call Return return:yes Call Call FunctionDef name:assign_add arg:self arg:value arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg Call Return return:yes Call Call FunctionDef name:assign arg:self arg:value arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg Call Return return:yes Call Call FunctionDef name:read_value arg:self arguments arg Call Return return:yes Call Call" + }, + { + "library": "seaborn", + "name": "_filter_params", + "source_code": "def _filter_params(self, params: dict[str, Any]) -> dict[str, Any]:\n return {k: v for k, v in params.items() if any((k.startswith(p) for p in self.THEME_GROUPS))}", + "docstring": "Restruct to thematic rc params.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\plot.py", + "ast_data": "FunctionDef name:_filter_params arg:self arg:params arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "normalize", + "source_code": "def normalize(self):\n n = np.sqrt(self.norm)\n return self.__class__(self.scalar / n, self.vector / n)", + "docstring": "Scaling such that norm equals 1", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", + "ast_data": "FunctionDef name:normalize arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "quote", + "source_code": "def quote(s):\n return s.translate(QUOTE_MAP) if isinstance(s, str) else s", + "docstring": "Ensure that primary key values do not confuse the admin URLs by escaping any '/', '_' and ':' and similarly problematic characters. Similar to urllib.parse.quote(), except that the quoting is slightly different so that it doesn't get automatically unquoted by the web browser.", + "type": "function", + "file_path": "django\\django\\contrib\\admin\\utils.py", + "ast_data": "FunctionDef name:quote arg:s arguments arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "ty", + "source_code": "@ty.setter\ndef ty(self, value: Union[Tensor, float]) -> 'PinholeCamera':\n self.extrinsics[..., 1, -1] = value\n return self", + "docstring": "Set the y-coordinate of the translation vector with the given value.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", + "ast_data": "FunctionDef name:ty arg:self arg:value arguments arg arg Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "file_generator_limited", + "source_code": "def file_generator_limited(fileobj, count, chunk_size=65536):\n remaining = count\n while remaining > 0:\n chunk = fileobj.read(min(chunk_size, remaining))\n chunklen = len(chunk)\n if chunklen == 0:\n return\n remaining -= chunklen\n yield chunk", + "docstring": "Yield the given file object in chunks. Stopps after bytes has been emitted. Default chunk size is 64kB. (Core)", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\__init__.py", + "ast_data": "FunctionDef name:file_generator_limited arg:fileobj arg:count arg:chunk_size arguments arg arg arg Assign While Compare Assign Call Call Assign Call If Compare Return return:no" + }, + { + "library": "pytorch", + "name": "state_dict", + "source_code": "@override\ndef state_dict(self) -> dict[str, Any]:\n state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', '_schedulers')}\n state_dict['_schedulers'] = [None] * len(self._schedulers)\n for idx, s in enumerate(self._schedulers):\n state_dict['_schedulers'][idx] = s.state_dict()\n return state_dict", + "docstring": "Return the state of the scheduler as a :class:. It contains an entry for every variable in self.__dict__ which is not the optimizer. The wrapped scheduler states will also be saved.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "FunctionDef name:state_dict arg:self arguments arg Assign Call Compare Assign Call For Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "pad_v2", + "source_code": "@tf_export('pad', v1=[])\n@dispatch.add_dispatch_support\ndef pad_v2(tensor, paddings, mode='CONSTANT', constant_values=0, name=None):\n return pad(tensor, paddings, mode, name, constant_values)", + "docstring": "Pads a tensor. This operation pads a according to the you specify. is an integer tensor with shape , where n is the rank of . For each dimension D of , indicates how many values to add before the contents of in that dimension, and indicates how many values to add after the contents of in that dimension. If is \"REFLECT\" then both and must be no greater than . If is \"SYMMETRIC\" then both and must be no greater than . The padded size of each dimension D of the output is: For example: Args: tensor: A . paddings: A of type . mode: One of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\" (case-insensitive) constant_values: In \"CONSTANT\" mode, the scalar pad value to use. Must be same type as . name: A name for the operation (optional). Returns: A . Has the same type as . Raises: ValueError: When mode is not one of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\".", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", + "ast_data": "FunctionDef name:pad_v2 arg:tensor arg:paddings arg:mode arg:constant_values arg:name arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_RandomStates", + "source_code": "class _RandomStates(_Constraint):\n\n def __init__(self):\n super().__init__()\n self._constraints = [Interval(Integral, 0, 2 ** 32 - 1, closed='both'), _InstancesOf(np.random.RandomState), _NoneConstraint()]\n\n def is_satisfied_by(self, val):\n return any((c.is_satisfied_by(val) for c in self._constraints))\n\n def __str__(self):\n return f'{', '.join([str(c) for c in self._constraints[:-1]])} or {self._constraints[-1]}'", + "docstring": "Constraint representing random states. Convenience class for [Interval(Integral, 0, 2**32 - 1, closed=\"both\"), np.random.RandomState, None]", + "type": "class", + "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py", + "ast_data": "ClassDef name:_RandomStates FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call Call Call FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg Return return:yes Call Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "domain_dimension", + "source_code": "@property\ndef domain_dimension(self):\n if self.shape.rank is None:\n return tensor_shape.Dimension(None)\n else:\n return self.shape.dims[-1]", + "docstring": "Dimension (in the sense of vector spaces) of the domain of this operator. If this operator acts like the batch matrix with , then this returns . Returns: object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:domain_dimension arg:self arguments arg If Compare Return return:yes Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_feature_names_out", + "source_code": "def get_feature_names_out(self, input_features=None):\n check_is_fitted(self, 'n_features_in_')\n input_features = _check_feature_names_in(self, input_features)\n if hasattr(self, '_encoder'):\n return self._encoder.get_feature_names_out(input_features)\n return input_features", + "docstring": "Get output feature names. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If is , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_discretization.py", + "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "pandas", + "name": "all", + "source_code": "@final\n@Substitution(name='groupby')\n@Substitution(see_also=_common_see_also)\ndef all(self, skipna: bool=True) -> NDFrameT:\n return self._cython_agg_general('all', alt=lambda x: Series(x, copy=False).all(skipna=skipna), skipna=skipna)", + "docstring": "Return True if all values in the group are truthful, else False. Parameters ---------- skipna : bool, default True Flag to ignore nan values during truth testing. Returns ------- Series or DataFrame DataFrame or Series of boolean values, where a value is True if all elements are True within its respective group, False otherwise. %(see_also)s Examples -------- For SeriesGroupBy: >>> lst = [\"a\", \"a\", \"b\"] >>> ser = pd.Series([1, 2, 0], index=lst) >>> ser a 1 a 2 b 0 dtype: int64 >>> ser.groupby(level=0).all() a True b False dtype: bool For DataFrameGroupBy: >>> data = [[1, 0, 3], [1, 5, 6], [7, 8, 9]] >>> df = pd.DataFrame( ... data, columns=[\"a\", \"b\", \"c\"], index=[\"ostrich\", \"penguin\", \"parrot\"] ... ) >>> df a b c ostrich 1 0 3 penguin 1 5 6 parrot 7 8 9 >>> df.groupby(by=[\"a\"]).all() b c a 1 False True 7 True True", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\groupby.py", + "ast_data": "FunctionDef name:all arg:self arg:skipna arguments arg arg Return return:yes Call arguments arg Call Call Call Call" + }, + { + "library": "pytorch", + "name": "OutputLogger", + "source_code": "class OutputLogger(Logger):\n\n def __init__(self):\n super().__init__()\n self.stats['tensor_val'] = []\n\n def forward(self, x):\n self.stats['tensor_val'].append(x)\n return x", + "docstring": "Class used to log the outputs of the module", + "type": "class", + "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite.py", + "ast_data": "ClassDef name:OutputLogger FunctionDef name:__init__ arg:self arguments arg Call Call Assign FunctionDef name:forward arg:self arg:x arguments arg arg Call Return return:yes" + }, + { + "library": "numpy", + "name": "isfunction", + "source_code": "def isfunction(object):\n return isinstance(object, types.FunctionType)", + "docstring": "Return true if the object is a user-defined function. Function objects provide these attributes: __doc__ documentation string __name__ name with which this function was defined func_code code object containing compiled function bytecode func_defaults tuple of any default values for arguments func_doc (same as __doc__) func_globals global namespace in which this function was defined func_name (same as __name__)", + "type": "function", + "file_path": "numpy\\numpy\\_utils\\_inspect.py", + "ast_data": "FunctionDef name:isfunction arg:object arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_TensorConversionFunction", + "source_code": "@staticmethod\ndef _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):\n _ = name\n if dtype and (not dtype.is_compatible_with(v.dtype)):\n raise ValueError(\"Incompatible type conversion requested to type '%s' for variable of type '%s'\" % (dtype.name, v.dtype.name))\n if as_ref:\n return v._ref()\n else:\n return v.value()", + "docstring": "Utility function for converting a Variable to a Tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", + "ast_data": "FunctionDef name:_TensorConversionFunction arg:v arg:dtype arg:name arg:as_ref arguments arg arg arg arg Assign If BoolOp Call Raise Call If Return return:yes Call Return return:yes Call" + }, + { + "library": "authlib", + "name": "create_json_request", + "source_code": "def create_json_request(self, request) -> JsonRequest:\n raise NotImplementedError()", + "docstring": "This method MUST be implemented in framework integrations. It is used to create an HttpRequest instance. :param request: the \"request\" instance in framework :return: HttpRequest instance", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py", + "ast_data": "FunctionDef name:create_json_request arg:self arg:request arguments arg arg Raise Call" + }, + { + "library": "matplotlib", + "name": "_make_verts", + "source_code": "def _make_verts(self, t, f1, f2, where):\n self._validate_shapes(self.t_direction, self._f_direction, t, f1, f2)\n where = self._get_data_mask(t, f1, f2, where)\n t, f1, f2 = np.broadcast_arrays(np.atleast_1d(t), f1, f2, subok=True)\n self._bbox = transforms.Bbox.null()\n self._bbox.update_from_data_xy(self._fix_pts_xy_order(np.concatenate([np.stack((t[where], f[where]), axis=-1) for f in (f1, f2)])))\n return [self._make_verts_for_region(t, f1, f2, idx0, idx1) for idx0, idx1 in cbook.contiguous_regions(where)]", + "docstring": "Make verts that can be forwarded to .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:_make_verts arg:self arg:t arg:f1 arg:f2 arg:where arguments arg arg arg arg arg Call Assign Call Assign Call Call Assign Call Call Call Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "flatten", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef flatten(x):\n return array_ops.reshape(x, [-1])", + "docstring": "Flatten a tensor. Args: x: A tensor or variable. Returns: A tensor, reshaped into 1-D Example: >>> b = tf.constant([[1, 2], [3, 4]]) >>> b >>> tf.keras.backend.flatten(b)", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:flatten arg:x arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "initialize_inference_session", + "source_code": "def initialize_inference_session(self, initializer: Callable[[str | bytes], ort.InferenceSession]=_ort_session_initializer) -> None:\n logger.debug('Initializing the inference session.')\n if (byte_size := _count_initializer_size(self.model.graph)) > _LARGE_MODEL_THRESHOLD:\n logger.debug('The model initializers is larger than 1.5GB (%s).', byte_size)\n self._tempdir = tempfile.TemporaryDirectory(ignore_cleanup_errors=True)\n model_path = os.path.join(self._tempdir.name, 'model.onnx')\n self.save(model_path, external_data=True)\n model = model_path\n else:\n model = self.model_proto.SerializeToString()\n self._inference_session = initializer(model)\n logger.debug('Inference session initialized.')", + "docstring": "Initialize the ONNX Runtime inference session. Args: initializer: The function to initialize the ONNX Runtime inference session with the specified model. By default, it uses the :func: function.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py", + "ast_data": "FunctionDef name:initialize_inference_session arg:self arg:initializer arguments arg arg Call If Compare Call Call Assign Call Assign Call Call Assign Assign Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_old_tf_stateless_truncated_normal", + "source_code": "def _old_tf_stateless_truncated_normal(shape, seed, mean=0.0, stddev=1.0, dtype=dtypes.float32, name=None, layout=None):\n with ops.name_scope(name, 'stateless_truncated_normal', [shape, seed, mean, stddev]) as name:\n seed = ops.convert_to_tensor(seed, dtype=dtypes.int32, name='seed')\n shape = shape_util.shape_tensor(shape)\n mean = ops.convert_to_tensor(mean, dtype=dtype, name='mean')\n stddev = ops.convert_to_tensor(stddev, dtype=dtype, name='stddev')\n rnd = api.call_with_layout(gen_stateless_random_ops.stateless_truncated_normal, layout, shape, seed, dtype)\n result = math_ops.add(rnd * stddev, mean, name=name)\n shape_util.maybe_set_static_shape(result, shape)\n return result", + "docstring": "DTensor stateless truncated normal implementation that takes an layout.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\d_random.py", + "ast_data": "FunctionDef name:_old_tf_stateless_truncated_normal arg:shape arg:seed arg:mean arg:stddev arg:dtype arg:name arg:layout arguments arg arg arg arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "is_frozen_param", + "source_code": "def is_frozen_param(t: torch.Tensor) -> bool:\n return getattr(t, '_is_frozen_param', False)", + "docstring": "Return True if the tensor is a frozen param.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\freezing_utils.py", + "ast_data": "FunctionDef name:is_frozen_param arg:t arguments arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "generate_knots", + "source_code": "def generate_knots(x, y, *, w=None, xb=None, xe=None, k=3, s=0, nest=None):\n if s == 0:\n if nest is not None or w is not None:\n raise ValueError('s == 0 is interpolation only')\n t = _not_a_knot(x, k)\n yield t\n return\n x, y, w, k, s, xb, xe = _validate_inputs(x, y, w, k, s, xb, xe, parametric=np.ndim(y) == 2)\n yield from _generate_knots_impl(x, y, w=w, xb=xb, xe=xe, k=k, s=s, nest=nest)", + "docstring": "Generate knot vectors until the Least SQuares (LSQ) criterion is satified. Parameters ---------- x, y : array_like The data points defining the curve `nestx.size + k + 1`. .. versionadded:: 1.15.0", + "type": "function", + "file_path": "scipy\\scipy\\interpolate\\_fitpack_repro.py", + "ast_data": "FunctionDef name:generate_knots arg:x arg:y arguments arg arg arg arg arg arg arg arg If Compare If BoolOp Compare Compare Raise Call Assign Call Return return:no Assign Call Compare Call Call" + }, + { + "library": "matplotlib", + "name": "ToolMinorGrid", + "source_code": "class ToolMinorGrid(ToolBase):\n description = 'Toggle major and minor grids'\n default_keymap = property(lambda self: mpl.rcParams['keymap.grid_minor'])\n\n def trigger(self, sender, event, data=None):\n sentinel = str(uuid.uuid4())\n with cbook._setattr_cm(event, key=sentinel), mpl.rc_context({'keymap.grid_minor': sentinel}):\n mpl.backend_bases.key_press_handler(event, self.figure.canvas)", + "docstring": "Tool to toggle the major and minor grids of the figure.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "ClassDef name:ToolMinorGrid Assign Assign Call arguments arg FunctionDef name:trigger arg:self arg:sender arg:event arg:data arguments arg arg arg arg Assign Call Call With Call Call Call" + }, + { + "library": "django", + "name": "create_model", + "source_code": "def create_model(self, model):\n sql, params = self.table_sql(model)\n self.execute(sql, params or None)\n if self.connection.features.supports_comments:\n if model._meta.db_table_comment:\n self.alter_db_table_comment(model, None, model._meta.db_table_comment)\n if not self.connection.features.supports_comments_inline:\n for field in model._meta.local_fields:\n if field.db_comment:\n field_db_params = field.db_parameters(connection=self.connection)\n field_type = field_db_params['type']\n self.execute(*self._alter_column_comment_sql(model, field, field_type, field.db_comment))\n self.deferred_sql.extend(self._model_indexes_sql(model))\n for field in model._meta.local_many_to_many:\n if field.remote_field.through._meta.auto_created:\n self.create_model(field.remote_field.through)", + "docstring": "Create a table and any accompanying indexes or unique constraints for the given .", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\schema.py", + "ast_data": "FunctionDef name:create_model arg:self arg:model arguments arg arg Assign Call Call BoolOp If If Call If For If Assign Call Assign Call Call Call Call For If Call" + }, + { + "library": "kornia", + "name": "_detach_tensor_to_cpu", + "source_code": "def _detach_tensor_to_cpu(self, output_image: Union[Tensor, list[Tensor], tuple[Tensor]]) -> Union[Tensor, list[Tensor], tuple[Tensor]]:\n if isinstance(output_image, (Tensor,)):\n return output_image.detach().cpu()\n if isinstance(output_image, (list, tuple)):\n return type(output_image)([self._detach_tensor_to_cpu(out) for out in output_image])\n raise RuntimeError(f'Unexpected object {output_image} with a type of `{type(output_image)}`')", + "docstring": "Detach the input tensor (or list/tuple of tensors) from the GPU and move it to the CPU. Args: output_image (Union[Tensor, list[Tensor], tuple[Tensor]]): The input tensor(s) to be moved. Returns: Union[Tensor, list[Tensor], tuple[Tensor]]: The tensor(s) moved to the CPU and detached from the computational graph.", + "type": "method", + "file_path": "kornia\\kornia\\core\\module.py", + "ast_data": "FunctionDef name:_detach_tensor_to_cpu arg:self arg:output_image arguments arg arg If Call Return return:yes Call Call If Call Return return:yes Call Call Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "_padded_split", + "source_code": "def _padded_split(tensor, pieces):\n shape = tensor.shape\n if 1 != len(shape):\n raise ValueError('input tensor must be 1D')\n tensor_len = shape.dims[0].value\n with ops.colocate_with(tensor):\n if tensor_len % pieces != 0:\n chunk_size = 1 + tensor_len // pieces\n if pieces > tensor_len:\n pad_len = pieces - tensor_len\n extended_whole = array_ops.concat([tensor, array_ops.zeros([pad_len], dtype=tensor.dtype)], 0)\n parts = array_ops.split(extended_whole, pieces)\n return (parts, pad_len)\n elif (pieces - 1) * chunk_size >= tensor_len:\n pad_len = pieces * chunk_size % tensor_len\n extended_whole = array_ops.concat([tensor, array_ops.zeros([pad_len], dtype=tensor.dtype)], 0)\n parts = array_ops.split(extended_whole, pieces)\n return (parts, pad_len)\n else:\n last_chunk_size = tensor_len - (pieces - 1) * chunk_size\n pad_len = chunk_size - last_chunk_size\n piece_lens = [chunk_size for _ in range(pieces - 1)] + [last_chunk_size]\n parts = array_ops.split(tensor, piece_lens)\n parts[-1] = array_ops.concat([parts[-1], array_ops.zeros([pad_len], dtype=tensor.dtype)], 0)\n return (parts, pad_len)\n else:\n return (array_ops.split(tensor, pieces), 0)", + "docstring": "Like split for 1D tensors but pads-out case where len % pieces != 0. Args: tensor: that must be 1D. pieces: a positive integer specifying the number of pieces into which tensor should be split. Returns: list of of length pieces, which hold the values of thin input tensor, in order. The final tensor may be zero-padded on the end to make its size equal to those of all of the other tensors. Raises: ValueError: The input tensor is not 1D.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py", + "ast_data": "FunctionDef name:_padded_split arg:tensor arg:pieces arguments arg arg Assign If Compare Call Raise Call Assign With Call If Compare Assign If Compare Assign Assign Call Call Assign Call Return return:yes If Compare Assign Assign Call Call Assign Call Return return:yes Assign Assign Assign Call Assign Call Assign Call Call Return return:yes Return return:yes Call" + }, + { + "library": "kornia", + "name": "apply_transform_class", + "source_code": "def apply_transform_class(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n raise NotImplementedError", + "docstring": "Process class tags corresponding to the inputs that are transformed.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\base.py", + "ast_data": "FunctionDef name:apply_transform_class arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Raise" + }, + { + "library": "pytorch", + "name": "graph", + "source_code": "@graph.setter\ndef graph(self, g: Graph) -> None:\n assert isinstance(g, Graph), f'Expected a Graph instance, but got {type(g)}'\n self._graph = g\n g.owning_module = self\n self.recompile()", + "docstring": "Set the underlying ``", + "type": "method", + "file_path": "pytorch\\torch\\fx\\graph_module.py", + "ast_data": "FunctionDef name:graph arg:self arg:g arguments arg arg Call Call Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "log_cdf", + "source_code": "def log_cdf(self, value, name='log_cdf'):\n return self._call_log_cdf(value, name)", + "docstring": "Log cumulative distribution function. Given random variable , the cumulative distribution function is: Often, a numerical approximation can be used for that yields a more accurate answer than simply taking the logarithm of the when . Args: value: or . name: Python prepended to names of ops created by this function. Returns: logcdf: a of shape with values of type .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py", + "ast_data": "FunctionDef name:log_cdf arg:self arg:value arg:name arguments arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "ToolTriggerEvent", + "source_code": "class ToolTriggerEvent(ToolEvent):\n\n def __init__(self, name, sender, tool, canvasevent=None, data=None):\n super().__init__(name, sender, tool, data)\n self.canvasevent = canvasevent", + "docstring": "Event to inform that a tool has been triggered.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py", + "ast_data": "ClassDef name:ToolTriggerEvent FunctionDef name:__init__ arg:self arg:name arg:sender arg:tool arg:canvasevent arg:data arguments arg arg arg arg arg arg Call Call Assign" + }, + { + "library": "tensorflow", + "name": "_find_dtype", + "source_code": "def _find_dtype(value, preferred):\n result = _find_dtype_helper(value, preferred)\n if result == dtypes.int64 or result == dtypes.int32 or result is None:\n return result\n raise ValueError('Illegal dtype: ' + str(result))", + "docstring": "Returns the preferred dtype of value or preferred if preferred != None. This is used as an operator to pass over multiple objects in decreasing order of priority until there is a preferred dtype for one. For example, if you were adding three tensor-ish things (some tensors, some lists), and needed a preferred dtype, you could use this as: def adding(a, b, c, dtype = None): dtype = _find_dtype(a, dtype) dtype = _find_dtype(b, dtype) dtype = _find_dtype(c, dtype) if dtype is None: dtype = tf.float32 ...Code continues here... Args: value: a list, value, RowPartition, or tensor. preferred: a given dtype. If not None, this will be returned. Returns: an optional dtype.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:_find_dtype arg:value arg:preferred arguments arg arg Assign Call If BoolOp Compare Compare Compare Return return:yes Raise Call Call" + }, + { + "library": "scikit-learn", + "name": "_update_feature_log_prob", + "source_code": "def _update_feature_log_prob(self, alpha):\n smoothed_fc = self.feature_count_ + alpha\n smoothed_cc = smoothed_fc.sum(axis=1)\n self.feature_log_prob_ = np.log(smoothed_fc) - np.log(smoothed_cc.reshape(-1, 1))", + "docstring": "Apply smoothing to raw counts and recompute log probabilities", + "type": "method", + "file_path": "scikit-learn\\sklearn\\naive_bayes.py", + "ast_data": "FunctionDef name:_update_feature_log_prob arg:self arg:alpha arguments arg arg Assign Assign Call Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "_enable_cp_dispatcher", + "source_code": "@contextlib.contextmanager\ndef _enable_cp_dispatcher() -> Generator[None, None, None]:\n old_handlers = DTensor._op_dispatcher._custom_op_handlers\n DTensor._op_dispatcher._custom_op_handlers = {**old_handlers, **customized_ops}\n yield\n DTensor._op_dispatcher._custom_op_handlers = old_handlers", + "docstring": "Enables DTensor dispatcher to dispatch SDPA to CP.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py", + "ast_data": "FunctionDef name:_enable_cp_dispatcher arguments Assign Assign Assign" + }, + { + "library": "numpy", + "name": "_multi_dot_matrix_chain_order", + "source_code": "def _multi_dot_matrix_chain_order(arrays, return_costs=False):\n n = len(arrays)\n p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]\n m = zeros((n, n), dtype=double)\n s = empty((n, n), dtype=intp)\n for l in range(1, n):\n for i in range(n - l):\n j = i + l\n m[i, j] = inf\n for k in range(i, j):\n q = m[i, k] + m[k + 1, j] + p[i] * p[k + 1] * p[j + 1]\n if q < m[i, j]:\n m[i, j] = q\n s[i, j] = k\n return (s, m) if return_costs else s", + "docstring": "Return a np.array that encodes the optimal order of multiplications. The optimal order array is then used by to do the multiplication. Also return the cost matrix if is The implementation CLOSELY follows Cormen, \"Introduction to Algorithms\", Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices. cost[i, j] = min([ cost[prefix] + cost[suffix] + cost_mult(prefix, suffix) for k in range(i, j)])", + "type": "function", + "file_path": "numpy\\numpy\\linalg\\_linalg.py", + "ast_data": "FunctionDef name:_multi_dot_matrix_chain_order arg:arrays arg:return_costs arguments arg arg Assign Call Assign Assign Call Assign Call For Call For Call Assign Assign For Call Assign If Compare Assign Assign Return return:yes" + }, + { + "library": "scipy", + "name": "register_backend", + "source_code": "def register_backend(backend):\n backend = _backend_from_arg(backend)\n ua.register_backend(backend)", + "docstring": "Register a backend for permanent use. Registered backends have the lowest priority and will be tried after the global backend. Parameters ---------- backend : {object, 'scipy'} The backend to use. Can either be a `NotImplemented` >>> fft([1]) array([1.+0.j]) >>> set_global_backend(\"scipy\") # Restore global backend to default", + "type": "function", + "file_path": "scipy\\scipy\\fft\\_backend.py", + "ast_data": "FunctionDef name:register_backend arg:backend arguments arg Assign Call Call" + }, + { + "library": "scipy", + "name": "_bessel_zeros", + "source_code": "def _bessel_zeros(N):\n if N == 0:\n return asarray([])\n x0 = _campos_zeros(N)\n\n def f(x):\n return special.kve(N + 0.5, 1 / x)\n\n def fp(x):\n return special.kve(N - 0.5, 1 / x) / (2 * x ** 2) - special.kve(N + 0.5, 1 / x) / x ** 2 + special.kve(N + 1.5, 1 / x) / (2 * x ** 2)\n x = _aberth(f, fp, x0)\n for i in range(len(x)):\n x[i] = optimize.newton(f, x[i], fp, tol=1e-15)\n x = np.mean((x, x[::-1].conj()), 0)\n if abs(np.sum(x) + 1) > 1e-15:\n raise RuntimeError('Generated zeros are inaccurate')\n return x", + "docstring": "Find zeros of ordinary Bessel polynomial of order , by root-finding of modified Bessel function of the second kind", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_filter_design.py", + "ast_data": "FunctionDef name:_bessel_zeros arg:N arguments arg If Compare Return return:yes Call Assign Call FunctionDef name:f arg:x arguments arg Return return:yes Call FunctionDef name:fp arg:x arguments arg Return return:yes Call Call Call Assign Call For Call Call Assign Call Assign Call Call If Compare Call Call Raise Call Return return:yes" + }, + { + "library": "scipy", + "name": "todense", + "source_code": "def todense(self, order=None, out=None):\n return self._ascontainer(self.toarray(order=order, out=out))", + "docstring": "Return a dense representation of this sparse array. Parameters ---------- order : {'C', 'F'}, optional Whether to store multi-dimensional data in C (row-major) or Fortran (column-major) order in memory. The default is 'None', which provides no ordering guarantees. Cannot be specified in conjunction with the argument. out : ndarray, 2-D, optional If specified, uses this array as the output buffer instead of allocating a new array to return. The provided array must have the same shape and dtype as the sparse array on which you are calling the method. Returns ------- arr : ndarray, 2-D An array with the same shape and containing the same data represented by the sparse array, with the requested memory order. If was passed, the same object is returned after being modified in-place to contain the appropriate values.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_base.py", + "ast_data": "FunctionDef name:todense arg:self arg:order arg:out arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_PruneCSRMatrix", + "source_code": "def _PruneCSRMatrix(unpruned, pruned_pattern):\n _, dtype = sparse_csr_matrix_ops.dense_shape_and_type(pruned_pattern)\n coo_unpruned = sparse_tensor.SparseTensor(*sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(unpruned, type=dtype))\n coo_pruned_pattern = sparse_tensor.SparseTensor(*sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(pruned_pattern, type=dtype))\n return sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(*_PruneSparseTensor(coo_unpruned, coo_pruned_pattern))", + "docstring": "TODO(tabakg): Consider re-writing in C++.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_grad.py", + "ast_data": "FunctionDef name:_PruneCSRMatrix arg:unpruned arg:pruned_pattern arguments arg arg Assign Call Assign Call Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "get_metadata_routing", + "source_code": "def get_metadata_routing(self):\n router = MetadataRouter(owner=self.__class__.__name__).add(splitter=check_cv(self.cv), method_mapping=MethodMapping().add(caller='fit', callee='split'))\n return router", + "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.4 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_least_angle.py", + "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "compute_dof_from_df", + "source_code": "def compute_dof_from_df(self):\n J = CubicTriInterpolator._get_jacobian(self._tris_pts)\n tri_z = self.z[self._triangles]\n tri_dz = self.dz[self._triangles]\n tri_dof = self.get_dof_vec(tri_z, tri_dz, J)\n return tri_dof", + "docstring": "Compute reduced-HCT elements degrees of freedom, from the gradient.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py", + "ast_data": "FunctionDef name:compute_dof_from_df arg:self arguments arg Assign Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "_arg_x_as_expected", + "source_code": "def _arg_x_as_expected(value):\n value = np.asarray(value, order='C', dtype=np.float64)\n if value.ndim != 1:\n raise ValueError('`x` must be a 1-D array')\n return value", + "docstring": "Ensure argument is a 1-D C-contiguous array of dtype('float64'). Used in , and to make compatible with the signature of the wrapped Cython functions. Returns ------- value : ndarray A 1-D C-contiguous array with dtype('float64').", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_peak_finding.py", + "ast_data": "FunctionDef name:_arg_x_as_expected arg:value arguments arg Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "significant_strides_equal", + "source_code": "def significant_strides_equal(strides1: Sequence[_IntLike], strides2: Sequence[_IntLike], shape: Sequence[_IntLike]) -> bool:\n assert len(shape) == len(strides1) and len(strides1) == len(strides2)\n for dim, s1, s2 in zip(shape, strides1, strides2):\n if V.graph.sizevars.statically_known_leq(dim, 1):\n continue\n if not V.graph.sizevars.statically_known_equals(s1, s2) and (not V.graph.sizevars.symbolic_hint(s1) == V.graph.sizevars.symbolic_hint(s2)):\n return False\n return True", + "docstring": "Returns true if the strides are equal, ignoring dimensions of size 1 .", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "FunctionDef name:significant_strides_equal arg:strides1 arg:strides2 arg:shape arguments arg arg arg BoolOp Compare Call Call Compare Call Call For Call If Call If BoolOp Call Compare Call Call Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "_from_sequence_not_strict", + "source_code": "@classmethod\ndef _from_sequence_not_strict(cls, data, *, dtype=None, copy: bool=False, freq=lib.no_default, unit=None) -> Self:\n if dtype:\n dtype = _validate_td64_dtype(dtype)\n assert unit not in ['Y', 'y', 'M']\n data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=unit)\n if dtype is not None:\n data = astype_overflowsafe(data, dtype=dtype, copy=False)\n result = cls._simple_new(data, dtype=data.dtype, freq=inferred_freq)\n result._maybe_pin_freq(freq, {})\n return result", + "docstring": "_from_sequence_not_strict but without responsibility for finding the result's .", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\timedeltas.py", + "ast_data": "FunctionDef name:_from_sequence_not_strict arg:cls arg:data arguments arg arg arg arg arg arg If Assign Call Compare Assign Call If Compare Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "from_list", + "source_code": "@staticmethod\ndef from_list(index, queues):\n if not queues or not isinstance(queues, list) or (not all((isinstance(x, QueueBase) for x in queues))):\n raise TypeError('A list of queues expected')\n dtypes = queues[0].dtypes\n if not all((dtypes == q.dtypes for q in queues[1:])):\n raise TypeError('Queues do not have matching component dtypes.')\n names = queues[0].names\n if not all((names == q.names for q in queues[1:])):\n raise TypeError('Queues do not have matching component names.')\n queue_shapes = [q.shapes for q in queues]\n reduced_shapes = [functools.reduce(_shape_common, s) for s in zip(*queue_shapes)]\n queue_refs = array_ops_stack.stack([x.queue_ref for x in queues])\n selected_queue = array_ops.gather(queue_refs, index)\n return QueueBase(dtypes=dtypes, shapes=reduced_shapes, names=names, queue_ref=selected_queue)", + "docstring": "Create a queue using the queue reference from . Args: index: An integer scalar tensor that determines the input that gets selected. queues: A list of objects. Returns: A object. Raises: TypeError: When is not a list of objects, or when the data types of are not all the same.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", + "ast_data": "FunctionDef name:from_list arg:index arg:queues arguments arg arg If BoolOp Call Call Call Raise Call Assign If Call Compare Raise Call Assign If Call Compare Raise Call Assign Assign Call Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_validate_ngram_range", + "source_code": "def _validate_ngram_range(self):\n min_n, max_m = self.ngram_range\n if min_n > max_m:\n raise ValueError('Invalid value for ngram_range=%s lower boundary larger than the upper boundary.' % str(self.ngram_range))", + "docstring": "Check validity of ngram_range parameter", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py", + "ast_data": "FunctionDef name:_validate_ngram_range arg:self arguments arg Assign If Compare Raise Call Call" + }, + { + "library": "pandas", + "name": "construct_from_string", + "source_code": "@classmethod\ndef construct_from_string(cls, string: str_type) -> IntervalDtype:\n if not isinstance(string, str):\n raise TypeError(f\"'construct_from_string' expects a string, got {type(string)}\")\n if string.lower() == 'interval' or cls._match.search(string) is not None:\n return cls(string)\n msg = f\"Cannot construct a 'IntervalDtype' from '{string}'.\\n\\nIncorrectly formatted string passed to constructor. Valid formats include Interval or Interval[dtype] where dtype is numeric, datetime, or timedelta\"\n raise TypeError(msg)", + "docstring": "attempt to construct this type from a string, raise a TypeError if its not possible", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", + "ast_data": "FunctionDef name:construct_from_string arg:cls arg:string arguments arg arg If Call Raise Call Call If BoolOp Compare Call Compare Call Return return:yes Call Assign Raise Call" + }, + { + "library": "tensorflow", + "name": "resize_volumes", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef resize_volumes(x, depth_factor, height_factor, width_factor, data_format):\n if data_format == 'channels_first':\n output = repeat_elements(x, depth_factor, axis=2)\n output = repeat_elements(output, height_factor, axis=3)\n output = repeat_elements(output, width_factor, axis=4)\n return output\n elif data_format == 'channels_last':\n output = repeat_elements(x, depth_factor, axis=1)\n output = repeat_elements(output, height_factor, axis=2)\n output = repeat_elements(output, width_factor, axis=3)\n return output\n else:\n raise ValueError('Invalid data_format: ' + str(data_format))", + "docstring": "Resizes the volume contained in a 5D tensor. Args: x: Tensor or variable to resize. depth_factor: Positive integer. height_factor: Positive integer. width_factor: Positive integer. data_format: One of , . Returns: A tensor. Raises: ValueError: if is neither or .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:resize_volumes arg:x arg:depth_factor arg:height_factor arg:width_factor arg:data_format arguments arg arg arg arg arg If Compare Assign Call Assign Call Assign Call Return return:yes If Compare Assign Call Assign Call Assign Call Return return:yes Raise Call Call" + }, + { + "library": "scipy", + "name": "iter_variants", + "source_code": "def iter_variants(inputs, outputs):\n maps = [('i', 'l')]\n if not ('i' in inputs or 'l' in inputs or 'q' in inputs or ('p' in inputs)):\n maps = maps + [(a + 'dD', b + 'fF') for a, b in maps]\n for src, dst in maps:\n new_inputs = inputs\n new_outputs = outputs\n for a, b in zip(src, dst):\n new_inputs = new_inputs.replace(a, b)\n new_outputs = new_outputs.replace(a, b)\n yield (new_inputs, new_outputs)", + "docstring": "Generate variants of UFunc signatures, by changing variable types, within the limitation that the corresponding C types casts still work out. This does not generate all possibilities, just the ones required for the ufunc to work properly with the most common data types. Parameters ---------- inputs, outputs : str UFunc input and output signature strings Yields ------ new_input, new_output : str Modified input and output strings. Also the original input/output pair is yielded.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_generate_pyx.py", + "ast_data": "FunctionDef name:iter_variants arg:inputs arg:outputs arguments arg arg Assign If BoolOp Compare Compare Compare Compare Assign For Assign Assign For Call Assign Call Assign Call" + }, + { + "library": "numpy", + "name": "find", + "source_code": "def find(self, sub, start=0, end=None):\n return find(self, sub, start, end)", + "docstring": "For each element, return the lowest index in the string where substring is found. See Also -------- char.find", + "type": "method", + "file_path": "numpy\\numpy\\_core\\defchararray.py", + "ast_data": "FunctionDef name:find arg:self arg:sub arg:start arg:end arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "create_pseudo_output_names", + "source_code": "def create_pseudo_output_names(outputs):\n return _create_pseudo_names(outputs, prefix='output_')", + "docstring": "Create pseudo output names for a subclassed Model.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py", + "ast_data": "FunctionDef name:create_pseudo_output_names arg:outputs arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "inverse_keypoints", + "source_code": "def inverse_keypoints(self, input: Keypoints, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None, **kwargs: Any) -> Keypoints:\n output = input.clone()\n batch_prob = params['batch_prob']\n to_apply = batch_prob > 0.5\n if transform is None:\n raise RuntimeError('`transform` has to be a tensor. Got None.')\n params, flags = self._process_kwargs_to_params_and_flags(self._params if params is None else params, flags, **kwargs)\n if not to_apply.any():\n output = input\n elif to_apply.all():\n output = input.transform_keypoints_(transform)\n else:\n output[to_apply] = input[to_apply].transform_keypoints_(transform[to_apply])\n return output", + "docstring": "Inverse the transformation on keypoints. Args: input: input keypoints tensor or object. params: the corresponding parameters for an operation. flags: static parameters. transform: the inverse transformation matrix kwargs: additional arguments", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py", + "ast_data": "FunctionDef name:inverse_keypoints arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg arg Assign Call Assign Assign Compare If Compare Raise Call Assign Call Compare If Call Assign If Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "cdf", + "source_code": "def cdf(self, k, *args, **kwds):\n args, loc, _ = self._parse_args(*args, **kwds)\n k, loc = map(asarray, (k, loc))\n args = tuple(map(asarray, args))\n _a, _b = self._get_support(*args)\n k = asarray(k - loc)\n cond0 = self._argcheck(*args)\n cond1 = (k >= _a) & (k < _b)\n cond2 = k >= _b\n cond3 = np.isneginf(k)\n cond = cond0 & cond1 & np.isfinite(k)\n output = zeros(shape(cond), 'd')\n place(output, cond2 * (cond0 == cond0), 1.0)\n place(output, cond3 * (cond0 == cond0), 0.0)\n place(output, 1 - cond0 + np.isnan(k), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *(k,) + args)\n place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))\n if output.ndim == 0:\n return output[()]\n return output", + "docstring": "Cumulative distribution function of the given RV. Parameters ---------- k : array_like, int Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- cdf : ndarray Cumulative distribution function evaluated at .", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:cdf arg:self arg:k arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Compare Compare Assign Compare Assign Call Assign Call Assign Call Call Call Compare Call Compare Call Call If Call Assign Call Call Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_validate_preemption_failure", + "source_code": "def _validate_preemption_failure(self, e):\n if _is_worker_failure(e) and (not self._cluster.closure_queue._cancellation_mgr.is_cancelled):\n metric_utils.monitor_increment_counter('worker_failures')\n return\n raise e", + "docstring": "Validates that the given exception represents worker preemption.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", + "ast_data": "FunctionDef name:_validate_preemption_failure arg:self arg:e arguments arg arg If BoolOp Call Call Return return:no Raise" + }, + { + "library": "pandas", + "name": "stringify_path", + "source_code": "def stringify_path(filepath_or_buffer: FilePath | BaseBufferT, convert_file_like: bool=False) -> str | BaseBufferT:\n if not convert_file_like and is_file_like(filepath_or_buffer):\n return cast(BaseBufferT, filepath_or_buffer)\n if isinstance(filepath_or_buffer, os.PathLike):\n filepath_or_buffer = filepath_or_buffer.__fspath__()\n return _expand_user(filepath_or_buffer)", + "docstring": "Attempt to convert a path-like object to a string. Parameters ---------- filepath_or_buffer : object to be converted Returns ------- str_filepath_or_buffer : maybe a string version of the object Notes ----- Objects supporting the fspath protocol are coerced according to its __fspath__ method. Any other object is passed through unchanged, which includes bytes, strings, buffers, or anything else that's not even path-like.", + "type": "function", + "file_path": "pandas\\pandas\\io\\common.py", + "ast_data": "FunctionDef name:stringify_path arg:filepath_or_buffer arg:convert_file_like arguments arg arg If BoolOp Call Return return:yes Call If Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "SVSummaryThread", + "source_code": "class SVSummaryThread(coordinator.LooperThread):\n\n def __init__(self, sv, sess):\n super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)\n self._sv = sv\n self._sess = sess\n\n def run_loop(self):\n if self._sv.global_step is not None:\n summary_strs, global_step = self._sess.run([self._sv.summary_op, self._sv.global_step])\n else:\n summary_strs = self._sess.run(self._sv.summary_op)\n global_step = None\n if self._sv.summary_writer:\n logging.info('Recording summary at step %s.', global_step)\n self._sv.summary_writer.add_summary(summary_strs, global_step)", + "docstring": "A thread to save summaries on a timer.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py", + "ast_data": "ClassDef name:SVSummaryThread FunctionDef name:__init__ arg:self arg:sv arg:sess arguments arg arg arg Call Call Assign Assign FunctionDef name:run_loop arg:self arguments arg If Compare Assign Call Assign Call Assign If Call Call" + }, + { + "library": "matplotlib", + "name": "set_units", + "source_code": "def set_units(self, u):\n if u == self.units:\n return\n for axis in self._get_shared_axis():\n axis.units = u\n axis._update_axisinfo()\n axis.callbacks.process('units')\n axis.stale = True", + "docstring": "Set the units for axis. Parameters ---------- u : units tag Notes ----- The units of any shared axis will also be updated.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:set_units arg:self arg:u arguments arg arg If Compare Return return:no For Call Assign Call Call Assign" + }, + { + "library": "tensorflow", + "name": "release", + "source_code": "def release(self, group_id):\n self._validate_group_id(group_id)\n self._ready.acquire()\n self._group_member_counts[group_id] -= 1\n if self._group_member_counts[group_id] == 0:\n self._ready.notify_all()\n self._ready.release()", + "docstring": "Release the group lock for a specific group .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\util\\lock_util.py", + "ast_data": "FunctionDef name:release arg:self arg:group_id arguments arg arg Call Call If Compare Call Call" + }, + { + "library": "tensorflow", + "name": "prevent_feeding", + "source_code": "def prevent_feeding(self, tensor) -> None:\n self._unfeedable_tensors.add(tensor)", + "docstring": "Marks the given as unfeedable in this graph.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:prevent_feeding arg:self arg:tensor arguments arg arg Call" + }, + { + "library": "tensorflow", + "name": "_get_num_samples_or_steps", + "source_code": "def _get_num_samples_or_steps(ins, batch_size, steps_per_epoch):\n if steps_per_epoch:\n return steps_per_epoch\n return training_utils_v1.check_num_samples(ins, batch_size, steps_per_epoch, 'steps_per_epoch')", + "docstring": "Returns total number of samples (when training in batch mode) or steps.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_arrays_v1.py", + "ast_data": "FunctionDef name:_get_num_samples_or_steps arg:ins arg:batch_size arg:steps_per_epoch arguments arg arg arg If Return return:yes Return return:yes Call" + }, + { + "library": "pandas", + "name": "waitForNewPaste", + "source_code": "def waitForNewPaste(timeout=None):\n startTime = time.time()\n originalText = paste()\n while True:\n currentText = paste()\n if currentText != originalText:\n return currentText\n time.sleep(0.01)\n if timeout is not None and time.time() > startTime + timeout:\n raise PyperclipTimeoutException('waitForNewPaste() timed out after ' + str(timeout) + ' seconds.')", + "docstring": "This function call blocks until a new text string exists on the clipboard that is different from the text that was there when the function was first called. It returns this text. This function raises PyperclipTimeoutException if timeout was set to a number of seconds that has elapsed without non-empty text being put on the clipboard.", + "type": "function", + "file_path": "pandas\\pandas\\io\\clipboard\\__init__.py", + "ast_data": "FunctionDef name:waitForNewPaste arg:timeout arguments arg Assign Call Assign Call While Assign Call If Compare Return return:yes Call If BoolOp Compare Compare Call Raise Call Call" + }, + { + "library": "scipy", + "name": "InterpolatedUnivariateSpline", + "source_code": "class InterpolatedUnivariateSpline(UnivariateSpline):\n\n def __init__(self, x, y, w=None, bbox=[None] * 2, k=3, ext=0, check_finite=False):\n x, y, w, bbox, self.ext = self.validate_input(x, y, w, bbox, k, None, ext, check_finite)\n if not np.all(diff(x) > 0.0):\n raise ValueError('x must be strictly increasing')\n with FITPACK_LOCK:\n self._data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0], xe=bbox[1], s=0)\n self._reset_class()", + "docstring": "1-D interpolating spline for a given set of data points. .. legacy:: class Specifically, we recommend using instead. Fits a spline y = spl(x) of degree to the provided , data. Spline function passes through all provided points. Equivalent to with = 0. Parameters ---------- x : (N,) array_like Input dimension of data points -- must be strictly increasing y : (N,) array_like input dimension of data points w : (N,) array_like, optional Weights for spline fitting. Must be positive. If None (default), weights are all 1. bbox : (2,) array_like, optional 2-sequence specifying the boundary of the approximation interval. If None (default), `y`: >>> spl.get_residual() 0.0", + "type": "class", + "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py", + "ast_data": "ClassDef name:InterpolatedUnivariateSpline FunctionDef name:__init__ arg:self arg:x arg:y arg:w arg:bbox arg:k arg:ext arg:check_finite arguments arg arg arg arg arg arg arg arg Assign Call If Call Compare Call Raise Call With Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_object_identifier", + "source_code": "@property\ndef _object_identifier(self):\n return '_generic_user_object'", + "docstring": "String used to identify this object in a SavedModel. THIS FIELD HAS BEEN DEPRECATED IN FAVOR OF THE NAME REGISTERED WITH . Generally, the object identifier is constant across objects of the same class, while the metadata field is used for instance-specific data. Returns: String object identifier.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py", + "ast_data": "FunctionDef name:_object_identifier arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "output_plannable", + "source_code": "@property\ndef output_plannable(self) -> bool:\n return self._output_plannable", + "docstring": "Are all possible choices TritonTemplates or Extern Kernels with out variants", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "FunctionDef name:output_plannable arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_raw_device_count_nvml", + "source_code": "def _raw_device_count_nvml() -> int:\n from ctypes import byref, c_int, CDLL\n nvml_h = CDLL('libnvidia-ml.so.1')\n rc = nvml_h.nvmlInit()\n if rc != 0:\n warnings.warn(\"Can't initialize NVML\")\n return -1\n dev_count = c_int(-1)\n rc = nvml_h.nvmlDeviceGetCount_v2(byref(dev_count))\n if rc != 0:\n warnings.warn(\"Can't get nvml device count\")\n return -1\n del nvml_h\n return dev_count.value", + "docstring": "Return number of devices as reported by NVML or negative value if NVML discovery/initialization failed.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:_raw_device_count_nvml arguments Assign Call Assign Call If Compare Call Return return:yes Assign Call Assign Call Call If Compare Call Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "_compute_dplus", + "source_code": "def _compute_dplus(cdfvals, x):\n n = len(cdfvals)\n dplus = np.arange(1.0, n + 1) / n - cdfvals\n amax = dplus.argmax()\n loc_max = x[amax]\n return (dplus[amax], loc_max)", + "docstring": "Computes D+ as used in the Kolmogorov-Smirnov test. Parameters ---------- cdfvals : array_like Sorted array of CDF values between 0 and 1 x: array_like Sorted array of the stochastic variable itself Returns ------- res: Pair with the following elements: - The maximum distance of the CDF values below Uniform(0, 1). - The location at which the maximum is reached.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:_compute_dplus arg:cdfvals arg:x arguments arg arg Assign Call Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_export", + "source_code": "def _export(name: str):\n\n def wrapper(func):\n globals()[name] = func\n __all__.append(name)\n return func\n return wrapper", + "docstring": "Exports the function in the current global namespace.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py", + "ast_data": "FunctionDef name:_export arg:name arguments arg FunctionDef name:wrapper arg:func arguments arg Assign Call Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_broadcast_batch_dims", + "source_code": "def _broadcast_batch_dims(self, x, spectrum):\n spectrum = tensor_conversion.convert_to_tensor_v2_with_dispatch(spectrum, name='spectrum')\n batch_shape = self._batch_shape_tensor(shape=self._shape_tensor(spectrum=spectrum))\n spec_mat = array_ops.reshape(spectrum, array_ops.concat((batch_shape, [-1, 1]), axis=0))\n x, spec_mat = linear_operator_util.broadcast_matrix_batch_dims((x, spec_mat))\n x_batch_shape = array_ops.shape(x)[:-2]\n spectrum_shape = array_ops.shape(spectrum)\n spectrum = array_ops.reshape(spec_mat, array_ops.concat((x_batch_shape, self._block_shape_tensor(spectrum_shape=spectrum_shape)), axis=0))\n return (x, spectrum)", + "docstring": "Broadcast batch dims of batch matrix and spectrum.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_circulant.py", + "ast_data": "FunctionDef name:_broadcast_batch_dims arg:self arg:x arg:spectrum arguments arg arg arg Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_check_subgraph_closed", + "source_code": "def _check_subgraph_closed(n, reachable_by_input, input_nodes_set, name_to_input_name):\n next_to_visit = [n]\n visited = set()\n while next_to_visit:\n current_node = next_to_visit.pop()\n visited.add(current_node)\n if current_node in reachable_by_input and current_node not in input_nodes_set:\n raise TypeError('Node %s uses input %s not in input_nodes.' % (n, current_node))\n if current_node not in input_nodes_set:\n next_to_visit += [input_node for input_node in name_to_input_name[current_node] if input_node not in visited]", + "docstring": "Checks to make sure node only connects to predecessor graph through inputs. Args: n: Node to check reachable_by_input: Nodes that are reachable by all inputs of subgraph input_nodes_set: The set of nodes that are \"inputs\". name_to_input_name: Maps from name to the list of inputs. Raises: TypeError: If the given node uses items past inputs directly.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py", + "ast_data": "FunctionDef name:_check_subgraph_closed arg:n arg:reachable_by_input arg:input_nodes_set arg:name_to_input_name arguments arg arg arg arg Assign Assign Call While Assign Call Call If BoolOp Compare Compare Raise Call If Compare Compare" + }, + { + "library": "pytorch", + "name": "Softmax", + "source_code": "class Softmax(torch.nn.Softmax):\n\n def __init__(self, dim=None, scale=1.0, zero_point=0):\n super().__init__()\n self.dim = dim\n self.scale = scale\n self.zero_point = zero_point\n\n def forward(self, input):\n dim = self.dim\n if dim is None:\n stacklevel = 3\n dim = torch.nn.functional._get_softmax_dim('softmax', input.dim(), stacklevel)\n return torch.ops.quantized.softmax(input, dim, self.scale, self.zero_point)\n\n def _get_name(self):\n return 'QuantizedSoftmax'\n\n @staticmethod\n def from_float(mod, use_precomputed_fake_quant=False):\n scale, zero_point = mod.activation_post_process.calculate_qparams()\n return Softmax(mod.dim, float(scale), int(zero_point))\n\n @classmethod\n def from_reference(cls, mod, scale, zero_point):\n return cls(mod.dim, float(scale), int(zero_point))", + "docstring": "This is the quantized version of :class:. Args: dim: A dimension along which Softmax will be computed (so every slice along dim will sum to 1). scale: quantization scale of the output tensor zero_point: quantization zero point of the output tensor", + "type": "class", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\activation.py", + "ast_data": "ClassDef name:Softmax FunctionDef name:__init__ arg:self arg:dim arg:scale arg:zero_point arguments arg arg arg arg Call Call Assign Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Assign If Compare Assign Assign Call Call Return return:yes Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:from_float arg:mod arg:use_precomputed_fake_quant arguments arg arg Assign Call Return return:yes Call Call Call FunctionDef name:from_reference arg:cls arg:mod arg:scale arg:zero_point arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "_get_shared_axis", + "source_code": "def _get_shared_axis(self):\n name = self._get_axis_name()\n return [ax._axis_map[name] for ax in self._get_shared_axes()]", + "docstring": "Return list of shared axis for current axis.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:_get_shared_axis arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_pdf", + "source_code": "def _pdf(self, x, beta, m):\n N = 1.0 / (m / beta / (m - 1) * np.exp(-beta ** 2 / 2.0) + _norm_pdf_C * _norm_cdf(beta))\n\n def rhs(x, beta, m):\n return np.exp(-x ** 2 / 2)\n\n def lhs(x, beta, m):\n return (m / beta) ** m * np.exp(-beta ** 2 / 2.0) * (m / beta - beta - x) ** (-m)\n return N * xpx.apply_where(x > -beta, (x, beta, m), rhs, lhs)", + "docstring": "Return PDF of the crystalball function. -- | exp(-x**2 / 2), for x > -beta crystalball.pdf(x, beta, m) = N * | | A * (B - x)**(-m), for x <= -beta --", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_continuous_distns.py", + "ast_data": "FunctionDef name:_pdf arg:self arg:x arg:beta arg:m arguments arg arg arg arg Assign Call Call FunctionDef name:rhs arg:x arg:beta arg:m arguments arg arg arg Return return:yes Call FunctionDef name:lhs arg:x arg:beta arg:m arguments arg arg arg Return return:yes Call Return return:yes Call Compare" + }, + { + "library": "scipy", + "name": "mode", + "source_code": "def mode(a, axis=0):\n return _mode(a, axis=axis, keepdims=True)", + "docstring": "Returns an array of the modal (most common) value in the passed array. Parameters ---------- a : array_like n-dimensional array of which to find mode(s). axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array . Returns ------- mode : ndarray Array of modal values. count : ndarray Array of counts for each mode. Notes ----- For more details, see . Examples -------- >>> import numpy as np >>> from scipy import stats >>> from scipy.stats import mstats >>> m_arr = np.ma.array([1, 1, 0, 0, 0, 0], mask=[0, 0, 1, 1, 1, 0]) >>> mstats.mode(m_arr) # note that most zeros are masked ModeResult(mode=array([1.]), count=array([2.]))", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", + "ast_data": "FunctionDef name:mode arg:a arg:axis arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "diagonal", + "source_code": "@register_decomposition(aten.diagonal)\ndef diagonal(self: TensorLikeType, offset: int=0, dim1: int=0, dim2: int=1) -> TensorLikeType:\n num_dims = self.dim()\n dim1 = utils.canonicalize_dim(idx=dim1, rank=num_dims)\n dim2 = utils.canonicalize_dim(idx=dim2, rank=num_dims)\n torch._check(dim1 != dim2, lambda: f'diagonal dimensions cannot be identical {dim1}, {dim2}')\n storage_offset = self.storage_offset()\n if offset >= 0:\n diag_size = max(min(self.size()[dim1], self.size()[dim2] - offset), 0)\n else:\n diag_size = max(min(self.size()[dim1] + offset, self.size()[dim2]), 0)\n if diag_size > 0:\n if offset >= 0:\n storage_offset += offset * self.stride()[dim2]\n else:\n storage_offset -= offset * self.stride()[dim1]\n sizes = [s for i, s in enumerate(self.size()) if i not in (dim1, dim2)]\n sizes.append(diag_size)\n strides = [s for i, s in enumerate(self.stride()) if i not in (dim1, dim2)]\n strides.append(self.stride()[dim1] + self.stride()[dim2])\n result = self.as_strided(size=sizes, stride=strides, storage_offset=storage_offset)\n return result", + "docstring": "Reference implementation of torch.diagonal", + "type": "function", + "file_path": "pytorch\\torch\\_refs\\__init__.py", + "ast_data": "FunctionDef name:diagonal arg:self arg:offset arg:dim1 arg:dim2 arguments arg arg arg arg Assign Call Assign Call Assign Call Call Compare arguments Assign Call If Compare Assign Call Call Call Call Assign Call Call Call Call If Compare If Compare Call Call Assign Call Call Compare Call Assign Call Call Compare Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "should_skip_detecting_model", + "source_code": "def should_skip_detecting_model(migration, model):\n return model._meta.proxy or not model._meta.managed or (not router.allow_migrate(self.connection.alias, migration.app_label, model_name=model._meta.model_name))", + "docstring": "No need to detect tables for proxy models, unmanaged models, or models that can't be migrated on the current database.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\executor.py", + "ast_data": "FunctionDef name:should_skip_detecting_model arg:migration arg:model arguments arg arg Return return:yes BoolOp Call" + }, + { + "library": "tensorflow", + "name": "PreemptionSaveContext", + "source_code": "class PreemptionSaveContext(threading.local):\n\n def __init__(self):\n super().__init__()\n self._in_preemption_save_context = False\n\n def enter_preemption_save_context(self):\n self._in_preemption_save_context = True\n\n def exit_preemption_save_context(self):\n self._in_preemption_save_context = False\n\n def in_preemption_save_context(self):\n return self._in_preemption_save_context", + "docstring": "A context for saving checkpoint upon preemption.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_context.py", + "ast_data": "ClassDef name:PreemptionSaveContext FunctionDef name:__init__ arg:self arguments arg Call Call Assign FunctionDef name:enter_preemption_save_context arg:self arguments arg Assign FunctionDef name:exit_preemption_save_context arg:self arguments arg Assign FunctionDef name:in_preemption_save_context arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "add_fields", + "source_code": "def add_fields(self, form, index):\n initial_form_count = self.initial_form_count()\n if self.can_order:\n if index is not None and index < initial_form_count:\n form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), initial=index + 1, required=False, widget=self.get_ordering_widget())\n else:\n form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), required=False, widget=self.get_ordering_widget())\n if self.can_delete and (self.can_delete_extra or (index is not None and index < initial_form_count)):\n form.fields[DELETION_FIELD_NAME] = BooleanField(label=_('Delete'), required=False, widget=self.get_deletion_widget())", + "docstring": "A hook for adding extra fields on to each form instance.", + "type": "method", + "file_path": "django\\django\\forms\\formsets.py", + "ast_data": "FunctionDef name:add_fields arg:self arg:form arg:index arguments arg arg arg Assign Call If If BoolOp Compare Compare Assign Call Call Call Assign Call Call Call If BoolOp BoolOp BoolOp Compare Compare Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "_mean_carrier_measure", + "source_code": "@property\ndef _mean_carrier_measure(self) -> float:\n raise NotImplementedError", + "docstring": "Abstract method for expected carrier measure, which is required for computing entropy.", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\exp_family.py", + "ast_data": "FunctionDef name:_mean_carrier_measure arg:self arguments arg Raise" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "@deprecation.deprecated(None, 'The `SyncReplicaOptimizer` class is deprecated. For synchronous training, please use [Distribution Strategies](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute).', warn_once=True)\ndef __init__(self, opt, replicas_to_aggregate, total_num_replicas=None, variable_averages=None, variables_to_average=None, use_locking=False, name='sync_replicas'):\n if total_num_replicas is None:\n total_num_replicas = replicas_to_aggregate\n super(SyncReplicasOptimizer, self).__init__(use_locking, name)\n logging.info('SyncReplicasV2: replicas_to_aggregate=%s; total_num_replicas=%s', replicas_to_aggregate, total_num_replicas)\n self._opt = opt\n self._replicas_to_aggregate = replicas_to_aggregate\n self._gradients_applied = False\n self._variable_averages = variable_averages\n self._variables_to_average = variables_to_average\n self._total_num_replicas = total_num_replicas\n self._tokens_per_step = max(total_num_replicas, replicas_to_aggregate)\n self._global_step = None\n self._sync_token_queue = None\n self._chief_queue_runner = None\n self._accumulator_list = []", + "docstring": "Construct a sync_replicas optimizer. Args: opt: The actual optimizer that will be used to compute and apply the gradients. Must be one of the Optimizer classes. replicas_to_aggregate: number of replicas to aggregate for each variable update. total_num_replicas: Total number of tasks/workers/replicas, could be different from replicas_to_aggregate. If total_num_replicas > replicas_to_aggregate: it is backup_replicas + replicas_to_aggregate. If total_num_replicas < replicas_to_aggregate: Replicas compute multiple batches per update to variables. variable_averages: Optional object, used to maintain moving averages for the variables passed in . variables_to_average: a list of variables that need to be averaged. Only needed if variable_averages is passed in. use_locking: If True use locks for update operation. name: string. Optional name of the returned operation.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\sync_replicas_optimizer.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:opt arg:replicas_to_aggregate arg:total_num_replicas arg:variable_averages arg:variables_to_average arg:use_locking arg:name arguments arg arg arg arg arg arg arg arg If Compare Assign Call Call Call Assign Assign Assign Assign Assign Assign Assign Call Assign Assign Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "convert_to_tensor_or_composite", + "source_code": "def convert_to_tensor_or_composite(value, dtype=None, name=None) -> Union[EagerTensor, SymbolicTensor, composite_tensor.CompositeTensor]:\n return internal_convert_to_tensor_or_composite(value=value, dtype=dtype, name=name, as_ref=False)", + "docstring": "Converts the given object to a or . If is a it is returned unmodified. Otherwise, it is converted to a using . Args: value: A or an object that can be consumed by . dtype: (Optional.) The required of the returned or . name: (Optional.) A name to use if a new is created. Returns: A or , based on . Raises: ValueError: If does not match the element type of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:convert_to_tensor_or_composite arg:value arg:dtype arg:name arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "PipeliningShapeError", + "source_code": "class PipeliningShapeError(RuntimeError):\n pass", + "docstring": "Shape mismatch between configured and runtime values.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\pipelining\\_utils.py", + "ast_data": "ClassDef name:PipeliningShapeError" + }, + { + "library": "tensorflow", + "name": "transform_feature", + "source_code": "def transform_feature(self, transformation_cache, state_manager):\n return transformation_cache.get(self.categorical_column, state_manager)", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:transform_feature arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "copy_scoped_meta_graph", + "source_code": "def copy_scoped_meta_graph(from_scope, to_scope, from_graph=None, to_graph=None):\n from_graph = from_graph or ops.get_default_graph()\n to_graph = to_graph or ops.get_default_graph()\n if from_graph == to_graph and from_scope == to_scope:\n raise ValueError(f\"'from_scope' and 'to_scope' need to be different when performing copy in the same graph. Received: 'from_graph': {from_graph}, 'to_graph': {to_graph}, 'from_scope': {from_scope}, 'to_scope': {to_scope}.\")\n orig_meta_graph, var_list = export_scoped_meta_graph(export_scope=from_scope, graph=from_graph)\n var_list = import_scoped_meta_graph(orig_meta_graph, graph=to_graph, import_scope=to_scope)\n return var_list", + "docstring": "Copies a sub-meta_graph from one scope to another. Args: from_scope: name scope containing the subgraph to be copied. to_scope: name scope under which the copied subgraph will reside. from_graph: Optional from which to copy the subgraph. If , the default graph is use. to_graph: Optional to which to copy the subgraph. If , the default graph is used. Returns: A dictionary of that has been copied into . Raises: ValueError: If and are the same while and are also the same.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\meta_graph.py", + "ast_data": "FunctionDef name:copy_scoped_meta_graph arg:from_scope arg:to_scope arg:from_graph arg:to_graph arguments arg arg arg arg Assign BoolOp Call Assign BoolOp Call If BoolOp Compare Compare Raise Call Assign Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "_hist_bin_rice", + "source_code": "def _hist_bin_rice(x, range):\n del range\n return _ptp(x) / (2.0 * x.size ** (1.0 / 3))", + "docstring": "Rice histogram bin estimator. Another simple estimator with no normality assumption. It has better performance for large data than Sturges, but tends to overestimate the number of bins. The number of bins is proportional to the cube root of data size (asymptotically optimal). The estimate depends only on size of the data. Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_histograms_impl.py", + "ast_data": "FunctionDef name:_hist_bin_rice arg:x arg:range arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_tmp_dir_for_key", + "source_code": "@classmethod\ndef _get_tmp_dir_for_key(cls: type[FxGraphCache], key: str) -> str:\n return os.path.join(FxGraphCache._get_tmp_dir(), key[1:3], key)", + "docstring": "Return the disk location for a given cache key.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codecache.py", + "ast_data": "FunctionDef name:_get_tmp_dir_for_key arg:cls arg:key arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "has_default_writer", + "source_code": "def has_default_writer():\n return _summary_state.writer is not None", + "docstring": "Returns a boolean indicating whether a default summary writer exists.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:has_default_writer arguments Return return:yes Compare" + }, + { + "library": "django", + "name": "escape", + "source_code": "@keep_lazy(SafeString)\ndef escape(text):\n return SafeString(html.escape(str(text)))", + "docstring": "Return the given text with ampersands, quotes and angle brackets encoded for use in HTML. Always escape input, even if it's already escaped and marked as such. This may result in double-escaping. If this is a concern, use conditional_escape() instead.", + "type": "function", + "file_path": "django\\django\\utils\\html.py", + "ast_data": "FunctionDef name:escape arg:text arguments arg Return return:yes Call Call Call Call" + }, + { + "library": "pandas", + "name": "to_hdf", + "source_code": "def to_hdf(path_or_buf: FilePath | HDFStore, key: str, value: DataFrame | Series, mode: str='a', complevel: int | None=None, complib: str | None=None, append: bool=False, format: str | None=None, index: bool=True, min_itemsize: int | dict[str, int] | None=None, nan_rep=None, dropna: bool | None=None, data_columns: Literal[True] | list[str] | None=None, errors: str='strict', encoding: str='UTF-8') -> None:\n if append:\n f = lambda store: store.append(key, value, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, dropna=dropna, data_columns=data_columns, errors=errors, encoding=encoding)\n else:\n f = lambda store: store.put(key, value, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, data_columns=data_columns, errors=errors, encoding=encoding, dropna=dropna)\n if isinstance(path_or_buf, HDFStore):\n f(path_or_buf)\n else:\n path_or_buf = stringify_path(path_or_buf)\n with HDFStore(path_or_buf, mode=mode, complevel=complevel, complib=complib) as store:\n f(store)", + "docstring": "store this object, close it if we opened it", + "type": "function", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:to_hdf arg:path_or_buf arg:key arg:value arg:mode arg:complevel arg:complib arg:append arg:format arg:index arg:min_itemsize arg:nan_rep arg:dropna arg:data_columns arg:errors arg:encoding arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg If Assign arguments arg Call Assign arguments arg Call If Call Call Assign Call With Call Call" + }, + { + "library": "pytorch", + "name": "remove_weight_norm", + "source_code": "def remove_weight_norm(module: T_module, name: str='weight') -> T_module:\n for k, hook in module._forward_pre_hooks.items():\n if isinstance(hook, WeightNorm) and hook.name == name:\n hook.remove(module)\n del module._forward_pre_hooks[k]\n return module\n raise ValueError(f\"weight_norm of '{name}' not found in {module}\")", + "docstring": "Remove the weight normalization reparameterization from a module. Args: module (Module): containing module name (str, optional): name of weight parameter Example: >>> m = weight_norm(nn.Linear(20, 40)) >>> remove_weight_norm(m)", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\weight_norm.py", + "ast_data": "FunctionDef name:remove_weight_norm arg:module arg:name arguments arg arg For Call If BoolOp Call Compare Call Return return:yes Raise Call" + }, + { + "library": "pytorch", + "name": "CUTLASSArgs", + "source_code": "@dataclass\nclass CUTLASSArgs:\n architectures: Optional[str] = None\n cuda_version: Optional[str] = None\n instantiation_level: Optional[str] = None\n operations: Optional[str] = None\n build_dir = ''\n curr_build_dir = ''\n generator_target = ''\n kernels = 'all'\n ignore_kernels = ''\n exclude_kernels = ''\n kernel_filter_file: None = None\n selected_kernel_list: None = None\n interface_dir: None = None\n filter_by_cc = True\n disable_full_archs_compilation = False\n\n def __post_init__(self):\n if self.architectures is None or self.cuda_version is None:\n raise RuntimeError(f'self.architectures={self.architectures!r} or self.cuda_version={self.cuda_version!r} is None!')\n self.architectures = _normalize_cuda_arch(self.architectures)", + "docstring": "CUTLASS args used to initialize a CUTLASS Manifest.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cutlass_utils.py", + "ast_data": "ClassDef name:CUTLASSArgs Assign Assign Assign Assign Assign Assign Assign Assign FunctionDef name:__post_init__ arg:self arguments arg If BoolOp Compare Compare Raise Call Assign Call" + }, + { + "library": "tensorflow", + "name": "_has_tf_decorator_attr", + "source_code": "def _has_tf_decorator_attr(obj):\n return hasattr(obj, '_tf_decorator') and isinstance(getattr(obj, '_tf_decorator'), TFDecorator)", + "docstring": "Checks if object has _tf_decorator attribute. This check would work for mocked object as well since it would check if returned attribute has the right type. Args: obj: Python object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_decorator.py", + "ast_data": "FunctionDef name:_has_tf_decorator_attr arg:obj arguments arg Return return:yes BoolOp Call Call Call" + }, + { + "library": "pytorch", + "name": "atleast_2d", + "source_code": "def atleast_2d(arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType) -> Union[TensorLikeType, tuple[TensorLikeType, ...]]:\n if not args and isinstance(arg, collections.abc.Sequence):\n args_ = arg\n else:\n assert not isinstance(arg, collections.abc.Sequence)\n args_ = (arg,) + args\n unsqueeze_atleast_1d = partial(_unsqueeze_atleast, atleast_1d, 0)\n res = tuple((a if a.ndim >= 2 else unsqueeze_atleast_1d(a) for a in args_))\n return res if len(res) > 1 else res[0]", + "docstring": "Reference implementation of :func:.", + "type": "function", + "file_path": "pytorch\\torch\\_refs\\__init__.py", + "ast_data": "FunctionDef name:atleast_2d arg:arg arguments arg arg If BoolOp Call Assign Call Assign Assign Call Assign Call Compare Call Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "close", + "source_code": "@tf_should_use.should_use_result\ndef close(self, name=None):\n return self._implementation.close(name=name)", + "docstring": "Close the current TensorArray.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:close arg:self arg:name arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "ELU", + "source_code": "class ELU(Layer):\n\n def __init__(self, alpha=1.0, **kwargs):\n super(ELU, self).__init__(**kwargs)\n if alpha is None:\n raise ValueError('Alpha of an ELU layer cannot be None, requires a float. Got %s' % alpha)\n self.supports_masking = True\n self.alpha = backend.cast_to_floatx(alpha)\n\n def call(self, inputs):\n return backend.elu(inputs, self.alpha)\n\n def get_config(self):\n config = {'alpha': float(self.alpha)}\n base_config = super(ELU, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n return input_shape", + "docstring": "Exponential Linear Unit. It follows: Input shape: Arbitrary. Use the keyword argument (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Args: alpha: Scale for the negative factor.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\advanced_activations.py", + "ast_data": "ClassDef name:ELU FunctionDef name:__init__ arg:self arg:alpha arguments arg arg arg Call Call If Compare Raise Call Assign Assign Call FunctionDef name:call arg:self arg:inputs arguments arg arg Return return:yes Call FunctionDef name:get_config arg:self arguments arg Assign Call Assign Call Call Return return:yes Call Call Call Call Call FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "control_status_ctx", + "source_code": "@tf_export('__internal__.autograph.control_status_ctx', v1=[])\ndef control_status_ctx():\n ret = _control_ctx()[-1]\n return ret", + "docstring": "Returns the current control context for autograph. This method is useful when calling , The context will be used by tf_convert to determine whether it should convert the input function. See the sample usage like below: Returns: The current control context of autograph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\ag_ctx.py", + "ast_data": "FunctionDef name:control_status_ctx arguments Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_convert_expression", + "source_code": "def _convert_expression(expr) -> str:\n s = pprint_thing(expr)\n _check_expression(s)\n return s", + "docstring": "Convert an object to an expression. This function converts an object to an expression (a unicode string) and checks to make sure it isn't empty after conversion. This is used to convert operators to their string representation for recursive calls to :func:. Parameters ---------- expr : object The object to be converted to a string. Returns ------- str The string representation of an object. Raises ------ ValueError * If the expression is empty.", + "type": "function", + "file_path": "pandas\\pandas\\core\\computation\\eval.py", + "ast_data": "FunctionDef name:_convert_expression arg:expr arguments arg Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_debug_identity_v2_grad", + "source_code": "@ops.RegisterGradient('DebugIdentityV2')\ndef _debug_identity_v2_grad(op, dy):\n del op\n return dy", + "docstring": "Gradient function for the DebugIdentityV2 op.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\dumping_callback.py", + "ast_data": "FunctionDef name:_debug_identity_v2_grad arg:op arg:dy arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "HighlightOptions", + "source_code": "class HighlightOptions(object):\n\n def __init__(self, criterion, description=None, font_attr=DEFAULT_TENSOR_ELEMENT_HIGHLIGHT_FONT_ATTR):\n self.criterion = criterion\n self.description = description\n self.font_attr = font_attr", + "docstring": "Options for highlighting elements of a tensor.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\tensor_format.py", + "ast_data": "ClassDef name:HighlightOptions FunctionDef name:__init__ arg:self arg:criterion arg:description arg:font_attr arguments arg arg arg arg Assign Assign Assign" + }, + { + "library": "kornia", + "name": "__init__", + "source_code": "def __init__(self, image_size: ImageSize, params: Tensor) -> None:\n if params.shape[-1] != 12 or len(params.shape) > 2:\n raise ValueError('params must be of shape (B, 12) for BROWN_CONRADY Camera')\n super().__init__(BrownConradyTransform(), Z1Projection(), image_size, params)", + "docstring": "Construct BrownConradyModel class. Args: image_size: Image size params: Camera parameters of shape :math: of the form :math:.", + "type": "method", + "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:image_size arg:params arguments arg arg arg If BoolOp Compare Compare Call Raise Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "blocked_autorange", + "source_code": "def blocked_autorange(self, callback: Optional[Callable[[int, float], NoReturn]]=None, min_run_time: float=0.2) -> common.Measurement:\n number = self._estimate_block_size(min_run_time)\n\n def time_hook() -> float:\n return self._timeit(number)\n\n def stop_hook(times: list[float]) -> bool:\n return True\n times = self._threaded_measurement_loop(number, time_hook, stop_hook, min_run_time=min_run_time, callback=callback)\n return common.Measurement(number_per_run=number, raw_times=times, task_spec=self._task_spec)", + "docstring": "Measure many replicates while keeping timer overhead to a minimum. At a high level, blocked_autorange executes the following pseudo-code:: total_time = 0 while total_time < min_run_time start = timer() for _ in range(block_size): total_time += (timer() - start) Note the variable in the inner loop. The choice of block size is important to measurement quality, and must balance two competing objectives: 1) A small block size results in more replicates and generally better statistics. 2) A large block size better amortizes the cost of invocation, and results in a less biased measurement. This is important because CUDA synchronization time is non-trivial (order single to low double digit microseconds) and would otherwise bias the measurement. blocked_autorange sets block_size by running a warmup period, increasing block size until timer overhead is less than 0.1% of the overall computation. This value is then used for the main measurement loop. Returns: A object that contains measured runtimes and repetition counts, and can be used to compute statistics. (mean, median, etc.)", + "type": "method", + "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\timer.py", + "ast_data": "FunctionDef name:blocked_autorange arg:self arg:callback arg:min_run_time arguments arg arg arg Assign Call FunctionDef name:time_hook arguments Return return:yes Call FunctionDef name:stop_hook arg:times arguments arg Return return:yes Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "as_graph_element", + "source_code": "def as_graph_element(self, obj, allow_tensor=True, allow_operation=True) -> Union[tensor_lib.Tensor, 'Operation']:\n if self._finalized:\n return self._as_graph_element_locked(obj, allow_tensor, allow_operation)\n with self._lock:\n return self._as_graph_element_locked(obj, allow_tensor, allow_operation)", + "docstring": "Returns the object referred to by , as an or . This function validates that represents an element of this graph, and gives an informative error message if it is not. This function is the canonical way to get/validate an object of one of the allowed types from an external argument reference in the Session API. This method may be called concurrently from multiple threads. Args: obj: A , an , or the name of a tensor or operation. Can also be any object with an method that returns a value of one of these types. Note: will be called inside the graph's lock and so may not modify the graph. allow_tensor: If true, may refer to a . allow_operation: If true, may refer to an . Returns: The or in the Graph corresponding to . Raises: TypeError: If is not a type we support attempting to convert to types. ValueError: If is of an appropriate type but invalid. For example, an invalid string. KeyError: If is not an object in the graph.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:as_graph_element arg:self arg:obj arg:allow_tensor arg:allow_operation arguments arg arg arg arg If Return return:yes Call With Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "writeTrailer", + "source_code": "def writeTrailer(self):\n self.write(b'trailer\\n')\n self.write(pdfRepr({'Size': len(self.xrefTable), 'Root': self.rootObject, 'Info': self.infoObject}))\n self.write(b'\\nstartxref\\n%d\\n%%%%EOF\\n' % self.startxref)", + "docstring": "Write out the PDF trailer.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py", + "ast_data": "FunctionDef name:writeTrailer arg:self arguments arg Call Call Call Call Call" + }, + { + "library": "pandas", + "name": "_intersection_via_get_indexer", + "source_code": "@final\ndef _intersection_via_get_indexer(self, other: Index | MultiIndex, sort) -> ArrayLike | MultiIndex:\n left_unique = self.unique()\n right_unique = other.unique()\n indexer = left_unique.get_indexer_for(right_unique)\n mask = indexer != -1\n taker = indexer.take(mask.nonzero()[0])\n if sort is False:\n taker = np.sort(taker)\n result: MultiIndex | ExtensionArray | np.ndarray\n if isinstance(left_unique, ABCMultiIndex):\n result = left_unique.take(taker)\n else:\n result = left_unique.take(taker)._values\n return result", + "docstring": "Find the intersection of two Indexes using get_indexer. Returns ------- np.ndarray or ExtensionArray or MultiIndex The returned array will be unique.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_intersection_via_get_indexer arg:self arg:other arg:sort arguments arg arg arg Assign Call Assign Call Assign Call Assign Compare Assign Call Call If Compare Assign Call If Call Assign Call Assign Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "close", + "source_code": "def close():\n sess = getattr(cherrypy.serving, 'session', None)\n if getattr(sess, 'locked', False):\n sess.release_lock()\n if sess.debug:\n cherrypy.log('Lock released on close.', 'TOOLS.SESSIONS')", + "docstring": "Close the session object for this request.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:close arguments Assign Call If Call Call If Call" + }, + { + "library": "tensorflow", + "name": "zero_fraction", + "source_code": "@tf_export('math.zero_fraction', 'nn.zero_fraction')\n@dispatch.add_dispatch_support\ndef zero_fraction(value, name=None):\n with ops.name_scope(name, 'zero_fraction', [value]):\n value = ops.convert_to_tensor(value, name='value')\n size = array_ops.size(value, out_type=dtypes.int64)\n num_nonzero = tf_cond.cond(size <= dtypes.int32.max, true_fn=lambda: math_ops.cast(_count_nonzero(value, dtype=dtypes.int32), dtype=dtypes.int64), false_fn=lambda: _count_nonzero(value, dtype=dtypes.int64))\n with ops.name_scope('counts_to_fraction'):\n num_zero = size - num_nonzero\n num_zero_float32 = math_ops.cast(num_zero, dtype=dtypes.float32)\n size_float32 = math_ops.cast(size, dtype=dtypes.float32)\n zero_fraction_float32 = num_zero_float32 / size_float32\n return array_ops.identity(zero_fraction_float32, 'fraction')", + "docstring": "Returns the fraction of zeros in . If is empty, the result is . This is useful in summaries to measure and report sparsity. For example, Args: value: A tensor of numeric type. name: A name for the operation (optional). Returns: The fraction of zeros in , with type .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_impl.py", + "ast_data": "FunctionDef name:zero_fraction arg:value arg:name arguments arg arg With Call Assign Call Assign Call Assign Call Compare arguments Call Call arguments Call With Call Assign Assign Call Assign Call Assign Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_is_type", + "source_code": "def _is_type(t):\n return lambda x: isinstance(x.value, t)", + "docstring": "Factory for a type checking function of type `` or tuple of types.", + "type": "function", + "file_path": "pandas\\pandas\\core\\computation\\expr.py", + "ast_data": "FunctionDef name:_is_type arg:t arguments arg Return return:yes arguments arg Call" + }, + { + "library": "tensorflow", + "name": "_EuclideanNormGrad", + "source_code": "@ops.RegisterGradient('EuclideanNorm')\ndef _EuclideanNormGrad(op: ops.Operation, grad):\n output = op.outputs[0]\n if not op.get_attr('keep_dims'):\n output_shape_kept_dims = math_ops.reduced_shape(array_ops.shape(op.inputs[0]), op.inputs[1])\n output = array_ops.reshape(output, output_shape_kept_dims)\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n return (math_ops.truediv(op.inputs[0], output / grad), None)", + "docstring": "Gradient for EuclideanNorm.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_EuclideanNormGrad arg:op arg:grad arguments arg arg Assign If Call Assign Call Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "Posterize", + "source_code": "class Posterize(OperationBase):\n\n @staticmethod\n def _process_magnitude(magnitude: Tensor) -> Tensor:\n return magnitude.long()\n\n def __init__(self, initial_magnitude: Optional[float]=4.0, initial_probability: float=0.5, magnitude_range: Tuple[float, float]=(1.0, 8.0), temperature: float=0.1, symmetric_megnitude: bool=False) -> None:\n super().__init__(K.RandomPosterize(magnitude_range, same_on_batch=False, p=initial_probability), initial_magnitude=[('bits_factor', initial_magnitude)], temperature=temperature, symmetric_megnitude=symmetric_megnitude, magnitude_fn=Posterize._process_magnitude, gradient_estimator=STEFunction)", + "docstring": "Apply posterize operation. Args: initial_magnitude: the initial magnitude. initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. magnitude_range: the sampling range for random sampling and clamping the optimized magnitude. temperature: temperature for RelaxedBernoulli distribution used during training. symmetric_megnitude: if to randomly assign the magnitude as negative or not. Note: STE gradient estimator applied for back propagation.", + "type": "class", + "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py", + "ast_data": "ClassDef name:Posterize FunctionDef name:_process_magnitude arg:magnitude arguments arg Return return:yes Call FunctionDef name:__init__ arg:self arg:initial_magnitude arg:initial_probability arg:magnitude_range arg:temperature arg:symmetric_megnitude arguments arg arg arg arg arg arg Call Call Call" + }, + { + "library": "scipy", + "name": "extent", + "source_code": "def extent(self, n: int, axes_seq: Literal['tf', 'ft']='tf', center_bins: bool=False) -> tuple[float, float, float, float]:\n if axes_seq not in ('tf', 'ft'):\n raise ValueError(f\"Parameter axes_seq={axes_seq!r} not in ['tf', 'ft']!\")\n if self.onesided_fft:\n q0, q1 = (0, self.f_pts)\n elif self.fft_mode == 'centered':\n q0 = -(self.mfft // 2)\n q1 = self.mfft // 2 if self.mfft % 2 == 0 else self.mfft // 2 + 1\n else:\n raise ValueError(f'Attribute fft_mode={self.fft_mode} must be ' + \"in ['centered', 'onesided', 'onesided2X']\")\n p0, p1 = (self.p_min, self.p_max(n))\n if center_bins:\n t0, t1 = (self.delta_t * (p0 - 0.5), self.delta_t * (p1 - 0.5))\n f0, f1 = (self.delta_f * (q0 - 0.5), self.delta_f * (q1 - 0.5))\n else:\n t0, t1 = (self.delta_t * p0, self.delta_t * p1)\n f0, f1 = (self.delta_f * q0, self.delta_f * q1)\n return (t0, t1, f0, f1) if axes_seq == 'tf' else (f0, f1, t0, t1)", + "docstring": "Return minimum and maximum values time-frequency values. A tuple with four floats `~ShortTimeFFT.stftmatplotlib.pyplot.imshow~ShortTimeFFT.stftmatplotlib.pyplot.imshowscipy.signal.ShortTimeFFTcenter_bins~matplotlib.pyplot.imshow~matplotlib.pyplot.imshow`.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_short_time_fft.py", + "ast_data": "FunctionDef name:extent arg:self arg:n arg:axes_seq arg:center_bins arguments arg arg arg arg If Compare Raise Call If Assign If Compare Assign Assign Compare Raise Call Assign Call If Assign Assign Assign Assign Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "get_hashable_command_line", + "source_code": "def get_hashable_command_line(build_option: BuildOptionsBase) -> str:\n return CppBuilder(name='o', sources='i', BuildOption=build_option).get_command_line()", + "docstring": "Writing the code to file will calculate a hash, which we need to vary if the command line flags change. This implements a mostly-generic way of validating that.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codecache.py", + "ast_data": "FunctionDef name:get_hashable_command_line arg:build_option arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "__str__", + "source_code": "def __str__(self):\n return self.diff_report()", + "docstring": "See function :func:.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\verification.py", + "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_key_counter", + "source_code": "def _get_key_counter(seed, alg):\n if alg == Algorithm.AUTO_SELECT.value:\n key, counter = gen_stateless_random_ops_v2.stateless_random_get_key_counter(seed)\n elif alg == Algorithm.PHILOX.value:\n key, counter = _philox_scramble_seed(seed)\n elif alg == Algorithm.THREEFRY.value:\n key = array_ops.reshape(_uint32s_to_uint64(math_ops.cast(seed, dtypes.uint32)), [1])\n counter = array_ops.zeros([1], dtypes.uint64)\n else:\n raise ValueError(unsupported_alg_error_msg(alg))\n return (key, counter)", + "docstring": "Calculates the key and counter to pass to raw RNG ops. This function calculates the key and counter that will be passed to the raw RNG ops like . Depending on the input , the key and counter may be scrambled or copied from . If is , the key and counter will be determined at runtime based on device type. Args: seed: An integer tensor of shape [2]. The seed to calculate the key and counter from. alg: The RNG algorithm. See for an explanation. Returns: A pair (key, counter) suitable for V2 stateless RNG ops like .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops_util.py", + "ast_data": "FunctionDef name:_get_key_counter arg:seed arg:alg arguments arg arg If Compare Assign Call If Compare Assign Call If Compare Assign Call Call Call Assign Call Raise Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "all_gather_tensor_autograd", + "source_code": "def all_gather_tensor_autograd(self: torch.Tensor, gather_dim: int, group: RANK_TYPES, tag: str=''):\n group_name = _resolve_group_name(group, tag)\n group_size = c10d._get_group_size_by_name(group_name)\n tensor = torch.ops._c10d_functional_autograd.all_gather_into_tensor(self, group_size, group_name)\n res = _FromTorchTensor.apply(tensor)\n if gather_dim != 0:\n if isinstance(res, AsyncCollectiveTensor):\n res = res.wait()\n res = torch.cat(torch.chunk(res, group_size, dim=0), dim=gather_dim)\n return res", + "docstring": "Gather tensor data across from all machines and concatenate over ``. Note that it currently only supports gather_dim = 0. This function is the same as all_gather_tensor but will propagate the backwards gradient across workers. See all_gather_tensor for more details on usage.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py", + "ast_data": "FunctionDef name:all_gather_tensor_autograd arg:self arg:gather_dim arg:group arg:tag arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call If Compare If Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, op: Callable, tensor: torch.Tensor, peer: Optional[int]=None, group: Optional[ProcessGroup]=None, tag: int=0, group_peer: Optional[int]=None):\n self.op = op\n self.tensor = tensor\n self.group = _group_or_default_group(group)\n self.peer = _canonicalize_group_rank(self.group, peer, group_peer, return_global=True)\n self.tag = tag\n self.group_peer = _canonicalize_group_rank(self.group, peer, group_peer)", + "docstring": "Init.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:op arg:tensor arg:peer arg:group arg:tag arg:group_peer arguments arg arg arg arg arg arg arg Assign Assign Assign Call Assign Call Assign Assign Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y):\n return self._fit(X, y)", + "docstring": "Fit the k-nearest neighbors regressor from the training dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) if metric='precomputed' Training data. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs) Target values. Returns ------- self : KNeighborsRegressor The fitted k-nearest neighbors regressor.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neighbors\\_regression.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "generate_all_int_dyn_dim_possibilities", + "source_code": "def generate_all_int_dyn_dim_possibilities(my_list: list[DVar]):\n eq_possibilities = [BinConstraintD(my_list[i], Dyn, op_eq) for i in range(len(my_list))]\n neq_possibilities = [BinConstraintD(my_list[i], Dyn, op_neq) for i in range(len(my_list))]\n d_possibilities = [list(i) for i in zip(eq_possibilities, neq_possibilities)]\n all_possibilities = list(itertools.product(*d_possibilities))\n return all_possibilities", + "docstring": "Generate all possibilities of being equal or not equal to dyn for my_list Args: my_list: List of tensor dimensions Returns: A list of a list of constraints. Each list of constraints corresponds to one possibility about the values of the dimension variables", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", + "ast_data": "FunctionDef name:generate_all_int_dyn_dim_possibilities arg:my_list arguments arg Assign Call Call Call Assign Call Call Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "loss_labels", + "source_code": "def loss_labels(self, outputs, targets, indices, num_boxes, log=True):\n assert 'pred_logits' in outputs\n src_logits = outputs['pred_logits']\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t['labels'][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device)\n target_classes[idx] = target_classes_o\n loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)\n losses = {'loss_ce': loss_ce}\n if log:\n losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]\n return losses", + "docstring": "Classification loss (NLL) targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]", + "type": "method", + "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py", + "ast_data": "FunctionDef name:loss_labels arg:self arg:outputs arg:targets arg:indices arg:num_boxes arg:log arguments arg arg arg arg arg arg Compare Assign Assign Call Assign Call Call Assign Call Assign Assign Call Call Assign If Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_shard_tensor", + "source_code": "def _shard_tensor(self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int, src_data_rank: Optional[int]=0) -> torch.Tensor:\n my_coordinate = mesh.get_coordinate()\n num_chunks = mesh.size(mesh_dim=mesh_dim)\n if my_coordinate is None:\n return tensor.new_empty(0, requires_grad=tensor.requires_grad)\n mesh_dim_local_rank = my_coordinate[mesh_dim]\n if src_data_rank is None:\n scatter_list, _ = self._split_tensor(tensor, num_chunks, with_padding=False, contiguous=True)\n return scatter_list[mesh_dim_local_rank]\n scatter_list, pad_sizes = self._split_tensor(tensor, num_chunks, with_padding=True, contiguous=True)\n output = torch.empty_like(scatter_list[mesh_dim_local_rank])\n mesh_scatter(output, scatter_list, mesh, mesh_dim=mesh_dim, group_src=src_data_rank)\n if pad_sizes[mesh_dim_local_rank] > 0:\n output = unpad_tensor(output, self.dim, pad_sizes[mesh_dim_local_rank])\n output = output.contiguous()\n return output", + "docstring": "shard and scatter a tensor on a mesh dimension (use coordinate 0 on the mesh dimension as source of truth)", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py", + "ast_data": "FunctionDef name:_shard_tensor arg:self arg:tensor arg:mesh arg:mesh_dim arg:src_data_rank arguments arg arg arg arg arg Assign Call Assign Call If Compare Return return:yes Call Assign If Compare Assign Call Return return:yes Assign Call Assign Call Call If Compare Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "getfile", + "source_code": "def getfile(object):\n unwrapped_object = tf_decorator.unwrap(object)[1]\n if hasattr(unwrapped_object, 'f_globals') and '__file__' in unwrapped_object.f_globals:\n return unwrapped_object.f_globals['__file__']\n return _inspect.getfile(unwrapped_object)", + "docstring": "TFDecorator-aware replacement for inspect.getfile.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py", + "ast_data": "FunctionDef name:getfile arg:object arguments arg Assign Call If BoolOp Call Compare Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "MapperMapDataPipe", + "source_code": "@functional_datapipe('map')\nclass MapperMapDataPipe(MapDataPipe[_T_co]):\n datapipe: MapDataPipe\n fn: Callable\n\n def __init__(self, datapipe: MapDataPipe, fn: Callable=default_fn) -> None:\n super().__init__()\n self.datapipe = datapipe\n _check_unpickable_fn(fn)\n self.fn = fn\n\n def __len__(self) -> int:\n return len(self.datapipe)\n\n def __getitem__(self, index) -> _T_co:\n return self.fn(self.datapipe[index])", + "docstring": "Apply the input function over each item from the source DataPipe (functional name: ``). The function can be any regular Python function or partial object. Lambda function is not recommended as it is not supported by pickle. Args: datapipe: Source MapDataPipe fn: Function being applied to each item Example: >>> # xdoctest: +SKIP >>> from torchdata.datapipes.map import SequenceWrapper, Mapper >>> def add_one(x): ... return x + 1 >>> dp = SequenceWrapper(range(10)) >>> map_dp_1 = dp.map(add_one) >>> list(map_dp_1) [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] >>> map_dp_2 = Mapper(dp, lambda x: x + 1) >>> list(map_dp_2) [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]", + "type": "class", + "file_path": "pytorch\\torch\\utils\\data\\datapipes\\map\\callable.py", + "ast_data": "ClassDef name:MapperMapDataPipe FunctionDef name:__init__ arg:self arg:datapipe arg:fn arguments arg arg arg Call Call Assign Call Assign FunctionDef name:__len__ arg:self arguments arg Return return:yes Call FunctionDef name:__getitem__ arg:self arg:index arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "TensorDataset", + "source_code": "class TensorDataset(Dataset[tuple[Tensor, ...]]):\n tensors: tuple[Tensor, ...]\n\n def __init__(self, *tensors: Tensor) -> None:\n assert all((tensors[0].size(0) == tensor.size(0) for tensor in tensors)), 'Size mismatch between tensors'\n self.tensors = tensors\n\n def __getitem__(self, index):\n return tuple((tensor[index] for tensor in self.tensors))\n\n def __len__(self):\n return self.tensors[0].size(0)", + "docstring": "Dataset wrapping tensors. Each sample will be retrieved by indexing tensors along the first dimension. Args: *tensors (Tensor): tensors that have the same size of the first dimension.", + "type": "class", + "file_path": "pytorch\\torch\\utils\\data\\dataset.py", + "ast_data": "ClassDef name:TensorDataset FunctionDef name:__init__ arg:self arguments arg arg Call Compare Call Call Assign FunctionDef name:__getitem__ arg:self arg:index arguments arg arg Return return:yes Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "reduce_window", + "source_code": "def reduce_window(operand, init, reducer, window_dimensions, window_strides=None, base_dilations=None, window_dilations=None, padding=None, name=None):\n window_strides = window_strides or [1] * len(window_dimensions)\n base_dilations = base_dilations or [1] * len(window_dimensions)\n window_dilations = window_dilations or [1] * len(window_dimensions)\n padding = padding or [(0, 0)] * len(window_dimensions)\n return gen_xla_ops.xla_reduce_window(input=operand, init_value=init, window_dimensions=window_dimensions, window_strides=window_strides, base_dilations=base_dilations, window_dilations=window_dilations, padding=padding, computation=reducer, name=name)", + "docstring": "Wraps the XLA ReduceWindow operator. ReduceWindow is documented at . Args: operand: the input tensor init: a scalar tensor representing the initial value for the reduction reducer: a reduction function that combines a pair of scalars. window_dimensions: shape of the window, as a list of integers window_strides: inter-window strides, as a list of integers. Optional; if omitted, defaults to strides of 1. padding: padding to apply to 'operand'. List of (low, high) pairs of integers that specify the padding to apply before and after each dimension. Optional; if omitted, defaults to no padding. name: the operator name, or None. Returns: A tensor that represents the output of the reduce_window operator.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\tf2xla\\python\\xla.py", + "ast_data": "FunctionDef name:reduce_window arg:operand arg:init arg:reducer arg:window_dimensions arg:window_strides arg:base_dilations arg:window_dilations arg:padding arg:name arguments arg arg arg arg arg arg arg arg arg Assign BoolOp Call Assign BoolOp Call Assign BoolOp Call Assign BoolOp Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "identity", + "source_code": "def identity(x, name=None):\n return array_ops.identity(x, name=name)", + "docstring": "Returns a tensor with the same content as the input tensor. Args: x: The input tensor. name: String, name for the variable to create. Returns: A tensor of the same shape, type and content.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:identity arg:x arg:name arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_maybe_store_many_sparse", + "source_code": "def _maybe_store_many_sparse(t, map_op_name, keep_input):\n out_tensor = utils.smart_cond(keep_input, lambda: _store_many_sparse(t, shared_name=map_op_name), lambda: -1 * array_ops.ones(array_ops.shape(t)[0:1], dtypes.int64))\n out_tensor.set_shape([None])\n return out_tensor", + "docstring": "Conditionally store multiple sparse Tensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\input.py", + "ast_data": "FunctionDef name:_maybe_store_many_sparse arg:t arg:map_op_name arg:keep_input arguments arg arg arg Assign Call arguments Call arguments Call Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "maintainers_add_info", + "source_code": "@staticmethod\ndef maintainers_add_info(context):\n repeated = set(context['maintainers']['active']) & set(context['maintainers']['inactive'])\n if repeated:\n raise ValueError(f'Maintainers {repeated} are both active and inactive')\n maintainers_info = {}\n for user in context['maintainers']['active'] + context['maintainers']['inactive']:\n resp = requests.get(f'https://api.github.com/users/{user}', headers=GITHUB_API_HEADERS, timeout=5)\n if resp.status_code == 403:\n sys.stderr.write('WARN: GitHub API quota exceeded when fetching maintainers\\n')\n resp_bkp = requests.get(context['main']['production_url'] + 'maintainers.json', timeout=5)\n resp_bkp.raise_for_status()\n maintainers_info = resp_bkp.json()\n break\n resp.raise_for_status()\n maintainers_info[user] = resp.json()\n context['maintainers']['github_info'] = maintainers_info\n with open(pathlib.Path(context['target_path']) / 'maintainers.json', 'w', encoding='utf-8') as f:\n json.dump(maintainers_info, f)\n return context", + "docstring": "Given the active maintainers defined in the yaml file, it fetches the GitHub user information for them.", + "type": "method", + "file_path": "pandas\\web\\pandas_web.py", + "ast_data": "FunctionDef name:maintainers_add_info arg:context arguments arg Assign Call Call If Raise Call Assign For Assign Call If Compare Call Assign Call Call Assign Call Call Assign Call Assign With Call Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "order_dict", + "source_code": "def order_dict(d):\n o = list(d.items())\n\n def _key(x):\n return x[1] + (x[0],)\n return sorted(o, key=_key)", + "docstring": "Order dict by its values.", + "type": "function", + "file_path": "numpy\\numpy\\_core\\code_generators\\genapi.py", + "ast_data": "FunctionDef name:order_dict arg:d arguments arg Assign Call Call FunctionDef name:_key arg:x arguments arg Return return:yes Return return:yes Call" + }, + { + "library": "django", + "name": "destroy_test_db", + "source_code": "def destroy_test_db(self, old_database_name=None, verbosity=1, keepdb=False, suffix=None):\n self.connection.close()\n if suffix is None:\n test_database_name = self.connection.settings_dict['NAME']\n else:\n test_database_name = self.get_test_db_clone_settings(suffix)['NAME']\n if verbosity >= 1:\n action = 'Destroying'\n if keepdb:\n action = 'Preserving'\n self.log('%s test database for alias %s...' % (action, self._get_database_display_str(verbosity, test_database_name)))\n if not keepdb:\n self._destroy_test_db(test_database_name, verbosity)\n if old_database_name is not None:\n settings.DATABASES[self.connection.alias]['NAME'] = old_database_name\n self.connection.settings_dict['NAME'] = old_database_name", + "docstring": "Destroy a test database, prompting the user for confirmation if the database already exists.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\creation.py", + "ast_data": "FunctionDef name:destroy_test_db arg:self arg:old_database_name arg:verbosity arg:keepdb arg:suffix arguments arg arg arg arg arg Call If Compare Assign Assign Call If Compare Assign If Assign Call Call If Call If Compare Assign Assign" + }, + { + "library": "tensorflow", + "name": "DeprecatedNamesAlreadySetError", + "source_code": "class DeprecatedNamesAlreadySetError(Exception):\n pass", + "docstring": "Raised when setting deprecated names multiple times for the same symbol.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py", + "ast_data": "ClassDef name:DeprecatedNamesAlreadySetError" + }, + { + "library": "tensorflow", + "name": "get_sample_protos", + "source_code": "def get_sample_protos(self):\n return self._node_name_to_sample.values()", + "docstring": "Returns list of protos for pprof profile.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py", + "ast_data": "FunctionDef name:get_sample_protos arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "aggregate_kernel_metrics", + "source_code": "def aggregate_kernel_metrics(metrics: list[str], kernel_metrics: list[dict[str, tuple[str, str]]]) -> list[list[str]]:\n if not kernel_metrics:\n raise app.UsageError('no metrics found')\n results: dict[str, tuple[list[float], str]] = {}\n for vals in kernel_metrics:\n for name in metrics:\n if name not in vals:\n raise app.UsageError(f\"metric '{name}' is not found\")\n value, unit = vals[name]\n if name not in results:\n results[name] = ([], unit)\n if results[name][1] != unit:\n raise app.UsageError(f\"unit mismatch for metric '{name}'\")\n results[name][0].append(float(value.replace(',', '')))\n kernel_metrics = []\n for name, (values, unit) in results.items():\n a = aggregate(values, name)\n if round(a) == a:\n kernel_metrics.append([name, f'{round(a)}', unit])\n else:\n kernel_metrics.append([name, f'{round(a, 2)}', unit])\n return kernel_metrics", + "docstring": "Aggregates and returns the metrics for the given kernels. Args: metrics: list of metrics names to print kernel_metrics: dictionary of metrics by kernel Returns: list of rows [name, value, unit] per metric.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\gpu\\codegen\\tools\\ncu_rep_lib.py", + "ast_data": "FunctionDef name:aggregate_kernel_metrics arg:metrics arg:kernel_metrics arguments arg arg If Raise Call For For If Compare Raise Call Assign If Compare Assign If Compare Raise Call Call Call Call Assign For Call Assign Call If Compare Call Call Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "_update_hessian", + "source_code": "def _update_hessian(self, ys, Bs, sBs, y):\n self.B = self._syr(1.0 / ys, y, a=self.B)\n self.B = self._syr(-1.0 / sBs, Bs, a=self.B)", + "docstring": "Update the Hessian matrix. BFGS update using the formula: ``. Formula (6.19) in [1]_. References ---------- .. [1] Nocedal, Jorge, and Stephen J. Wright. \"Numerical optimization\" Second Edition (2006).", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py", + "ast_data": "FunctionDef name:_update_hessian arg:self arg:ys arg:Bs arg:sBs arg:y arguments arg arg arg arg arg Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "pivot", + "source_code": "@property\ndef pivot(self):\n return self._pivot", + "docstring": "The boolean tensor representing the loop termination condition.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:pivot arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "float8_e5m2", + "source_code": "def float8_e5m2(self):\n _warn_typed_storage_removal()\n return self._to(torch.float8_e5m2)", + "docstring": "Casts this storage to float8_e5m2 type", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:float8_e5m2 arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "autoscale", + "source_code": "def autoscale(self, enable=True, axis='both', tight=None):\n if enable is None:\n scalex = True\n scaley = True\n scalez = True\n else:\n if axis in ['x', 'both']:\n self.set_autoscalex_on(enable)\n scalex = self.get_autoscalex_on()\n else:\n scalex = False\n if axis in ['y', 'both']:\n self.set_autoscaley_on(enable)\n scaley = self.get_autoscaley_on()\n else:\n scaley = False\n if axis in ['z', 'both']:\n self.set_autoscalez_on(enable)\n scalez = self.get_autoscalez_on()\n else:\n scalez = False\n if scalex:\n self._request_autoscale_view('x', tight=tight)\n if scaley:\n self._request_autoscale_view('y', tight=tight)\n if scalez:\n self._request_autoscale_view('z', tight=tight)", + "docstring": "Convenience method for simple axis view autoscaling. See for full documentation. Because this function applies to 3D Axes, *axis* can also be set to 'z', and setting *axis* to 'both' autoscales all three axes.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", + "ast_data": "FunctionDef name:autoscale arg:self arg:enable arg:axis arg:tight arguments arg arg arg arg If Compare Assign Assign Assign If Compare Call Assign Call Assign If Compare Call Assign Call Assign If Compare Call Assign Call Assign If Call If Call If Call" + }, + { + "library": "pytorch", + "name": "_sync_module_states", + "source_code": "def _sync_module_states(module: nn.Module, process_group: dist.ProcessGroup, broadcast_bucket_size: int, src: int, params_and_buffers_to_ignore: Container[str], broadcast_buffers: bool=True) -> None:\n module_states: list[torch.Tensor] = []\n for name, param in module.named_parameters():\n if name not in params_and_buffers_to_ignore:\n module_states.append(param.detach())\n if broadcast_buffers:\n for name, buffer in module.named_buffers():\n if name not in params_and_buffers_to_ignore:\n module_states.append(buffer.detach())\n _sync_params_and_buffers(process_group, module_states, broadcast_bucket_size, src)", + "docstring": "Sync ``.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\utils.py", + "ast_data": "FunctionDef name:_sync_module_states arg:module arg:process_group arg:broadcast_bucket_size arg:src arg:params_and_buffers_to_ignore arg:broadcast_buffers arguments arg arg arg arg arg arg For Call If Compare Call Call If For Call If Compare Call Call Call" + }, + { + "library": "tensorflow", + "name": "_clip_by_value_grad", + "source_code": "@ops.RegisterGradient('ClipByValue')\ndef _clip_by_value_grad(op, grad):\n x = op.inputs[0]\n y = op.inputs[1]\n z = op.inputs[2]\n gdtype = grad.dtype\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n sz = array_ops.shape(z)\n gradshape = array_ops.shape(grad)\n zeros = array_ops.zeros(gradshape, gdtype)\n xymask = math_ops.less(x, y)\n xzmask = math_ops.greater(x, z)\n _, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n _, rz = gen_array_ops.broadcast_gradient_args(sx, sz)\n xgrad = array_ops.where(math_ops.logical_or(xymask, xzmask), zeros, grad)\n ygrad = array_ops.where(xymask, grad, zeros)\n zgrad = array_ops.where(xzmask, grad, zeros)\n gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)\n gz = array_ops.reshape(math_ops.reduce_sum(zgrad, rz), sz)\n return (xgrad, gy, gz)", + "docstring": "Returns grad of clip_by_value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\clip_ops.py", + "ast_data": "FunctionDef name:_clip_by_value_grad arg:op arg:grad arguments arg arg Assign Assign Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_flatten_reduction_indices", + "source_code": "def _flatten_reduction_indices(self, multi_inds: list[sympy.Expr]) -> sympy.Expr:\n coeffs = self._get_reduction_index_coeffs()\n return sympy_dot(coeffs, multi_inds)", + "docstring": "Compute linear reduction indices from N dimensional ones.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py", + "ast_data": "FunctionDef name:_flatten_reduction_indices arg:self arg:multi_inds arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "irfft", + "source_code": "@array_function_dispatch(_fft_dispatcher)\ndef irfft(a, n=None, axis=-1, norm=None, out=None):\n a = asarray(a)\n if n is None:\n n = (a.shape[axis] - 1) * 2\n output = _raw_fft(a, n, axis, True, False, norm, out=out)\n return output", + "docstring": "Computes the inverse of . This function computes the inverse of the one-dimensional *n*-point discrete Fourier Transform of real input computed by . In other words, `rfftnnaxisnumpy.fftaxisaxisnnnaxisairfftnaannamnirfftifftirfft`, the negative frequencies are not specified, and the output array is purely real.", + "type": "function", + "file_path": "numpy\\numpy\\fft\\_pocketfft.py", + "ast_data": "FunctionDef name:irfft arg:a arg:n arg:axis arg:norm arg:out arguments arg arg arg arg arg Assign Call If Compare Assign Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "from_row_partitions", + "source_code": "@classmethod\ndef from_row_partitions(cls, row_partitions, dtype=None):\n if not row_partitions:\n raise ValueError('row_partitions cannot be empty')\n inner_shape = [row_partitions[-1].nvals()]\n return DynamicRaggedShape(row_partitions, inner_shape, dtype=dtype)", + "docstring": "Create a shape from row_partitions. Args: row_partitions: a nonempty list of RowPartition objects. dtype: the dtype to use, or None to use the row_partitions dtype. Returns: a DynamicRaggedShape with inner_rank==1.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:from_row_partitions arg:cls arg:row_partitions arg:dtype arguments arg arg arg If Raise Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_benchmarkFetchPrebuilt", + "source_code": "def _benchmarkFetchPrebuilt(self, name, target, size, iters):\n times = []\n with ops.Graph().as_default():\n v = variables.Variable(random_ops.random_normal([size]))\n with session.Session(target) as sess:\n sess.run(v.initializer)\n runner = sess.make_callable(v)\n runner()\n for _ in range(iters):\n start_time = time.time()\n runner()\n end_time = time.time()\n times.append(end_time - start_time)\n print('%s %d %f' % (name, size, np.median(times)))\n self.report_benchmark(iters=1, wall_time=np.median(times), name=name)", + "docstring": "Runs a microbenchmark to measure the cost of fetching a tensor. Reports the median cost of fetching a tensor of * bytes. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. size: The number of floating-point numbers to be fetched. iters: The number of iterations to perform.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\session_benchmark.py", + "ast_data": "FunctionDef name:_benchmarkFetchPrebuilt arg:self arg:name arg:target arg:size arg:iters arguments arg arg arg arg arg Assign With Call Call Assign Call Call With Call Call Assign Call Call For Call Assign Call Call Assign Call Call Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, boundaries, ncolors, clip=False, *, extend='neither'):\n if clip and extend != 'neither':\n raise ValueError(\"'clip=True' is not compatible with 'extend'\")\n super().__init__(vmin=boundaries[0], vmax=boundaries[-1], clip=clip)\n self.boundaries = np.asarray(boundaries)\n self.N = len(self.boundaries)\n if self.N < 2:\n raise ValueError(f'You must provide at least 2 boundaries (1 region) but you passed in {boundaries!r}')\n self.Ncmap = ncolors\n self.extend = extend\n self._scale = None\n self._n_regions = self.N - 1\n self._offset = 0\n if extend in ('min', 'both'):\n self._n_regions += 1\n self._offset = 1\n if extend in ('max', 'both'):\n self._n_regions += 1\n if self._n_regions > self.Ncmap:\n raise ValueError(f'There are {self._n_regions} color bins including extensions, but ncolors = {ncolors}; ncolors must equal or exceed the number of bins')", + "docstring": "Parameters ---------- boundaries : array-like Monotonically increasing sequence of at least 2 bin edges: data falling in the n-th bin will be mapped to the n-th color. ncolors : int Number of colors in the colormap to be used. clip : bool, optional If clip is `Colormap.__call__~matplotlib.colorbar.Colorbar` range, effectively skipping some colors in the middle of the colormap.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:boundaries arg:ncolors arg:clip arguments arg arg arg arg arg If BoolOp Compare Raise Call Call Call Assign Call Assign Call If Compare Raise Call Assign Assign Assign Assign Assign If Compare Assign If Compare If Compare Raise Call" + }, + { + "library": "matplotlib", + "name": "direction", + "source_code": "@direction.setter\ndef direction(self, direction):\n _api.check_in_list(['horizontal', 'vertical'], direction=direction)\n if hasattr(self, '_direction') and direction != self._direction:\n self._selection_artist.remove()\n if self._interactive:\n self._edge_handles.remove()\n self._direction = direction\n self.new_axes(self.ax)\n if self._interactive:\n self._setup_edge_handles(self._handle_props)\n else:\n self._direction = direction", + "docstring": "Set the direction of the span selector.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:direction arg:self arg:direction arguments arg arg Call If BoolOp Call Compare Call If Call Assign Call If Call Assign" + }, + { + "library": "django", + "name": "related_objects", + "source_code": "def related_objects(self, related_model, related_fields, objs):\n predicate = query_utils.Q.create([(f'{related_field.name}__in', objs) for related_field in related_fields], connector=query_utils.Q.OR)\n return related_model._base_manager.using(self.using).filter(predicate)", + "docstring": "Get a QuerySet of the related model to objs via related fields.", + "type": "method", + "file_path": "django\\django\\db\\models\\deletion.py", + "ast_data": "FunctionDef name:related_objects arg:self arg:related_model arg:related_fields arg:objs arguments arg arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "acheck_password", + "source_code": "async def acheck_password(password, encoded, setter=None, preferred='default'):\n is_correct, must_update = verify_password(password, encoded, preferred=preferred)\n if setter and is_correct and must_update:\n await setter(password)\n return is_correct", + "docstring": "See check_password().", + "type": "function", + "file_path": "django\\django\\contrib\\auth\\hashers.py", + "ast_data": "AsyncFunctionDef name:acheck_password arg:password arg:encoded arg:setter arg:preferred arguments arg arg arg arg Assign Call If BoolOp Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_OpTraceDispatchMode", + "source_code": "class _OpTraceDispatchMode(_python_dispatch.TorchDispatchMode):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.traced_ops = []\n\n def __torch_dispatch__(self, func, types, args=(), kwargs=None):\n self.traced_ops.append(func)\n return func(*args, **kwargs)", + "docstring": "Trace ops that were dispatched. Utilize the dispatch mechanism in []( to trace op overloads that were dispatched to. This is used to find the compatible op overload for a given op overload packet for different set of args and kwargs.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py", + "ast_data": "ClassDef name:_OpTraceDispatchMode FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign FunctionDef name:__torch_dispatch__ arg:self arg:func arg:types arg:args arg:kwargs arguments arg arg arg arg arg Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "desc_sig_punctuation", + "source_code": "class desc_sig_punctuation(desc_sig_element, _sig_element=True):\n classes = ['p']", + "docstring": "Node for punctuation in a signature.", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:desc_sig_punctuation Assign" + }, + { + "library": "scrapy", + "name": "TunnelingAgent", + "source_code": "class TunnelingAgent(Agent):\n\n def __init__(self, *, reactor: ReactorBase, proxyConf: tuple[str, int, bytes | None], contextFactory: IPolicyForHTTPS, connectTimeout: float | None=None, bindAddress: bytes | None=None, pool: HTTPConnectionPool | None=None):\n super().__init__(reactor, contextFactory, connectTimeout, bindAddress, pool)\n self._proxyConf: tuple[str, int, bytes | None] = proxyConf\n self._contextFactory: IPolicyForHTTPS = contextFactory\n\n def _getEndpoint(self, uri: URI) -> TunnelingTCP4ClientEndpoint:\n return TunnelingTCP4ClientEndpoint(reactor=self._reactor, host=uri.host, port=uri.port, proxyConf=self._proxyConf, contextFactory=self._contextFactory, timeout=self._endpointFactory._connectTimeout, bindAddress=self._endpointFactory._bindAddress)\n\n def _requestWithEndpoint(self, key: Any, endpoint: TCP4ClientEndpoint, method: bytes, parsedURI: bytes, headers: TxHeaders | None, bodyProducer: IBodyProducer | None, requestPath: bytes) -> Deferred[TxResponse]:\n key += self._proxyConf\n return super()._requestWithEndpoint(key=key, endpoint=endpoint, method=method, parsedURI=parsedURI, headers=headers, bodyProducer=bodyProducer, requestPath=requestPath)", + "docstring": "An agent that uses a L{TunnelingTCP4ClientEndpoint} to make HTTPS downloads. It may look strange that we have chosen to subclass Agent and not ProxyAgent but consider that after the tunnel is opened the proxy is transparent to the client; thus the agent should behave like there is no proxy involved.", + "type": "class", + "file_path": "scrapy\\scrapy\\core\\downloader\\handlers\\http11.py", + "ast_data": "ClassDef name:TunnelingAgent FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg arg Call Call FunctionDef name:_getEndpoint arg:self arg:uri arguments arg arg Return return:yes Call FunctionDef name:_requestWithEndpoint arg:self arg:key arg:endpoint arg:method arg:parsedURI arg:headers arg:bodyProducer arg:requestPath arguments arg arg arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_ImageDimensions", + "source_code": "def _ImageDimensions(image, rank):\n if image.get_shape().is_fully_defined():\n return image.get_shape().as_list()\n else:\n static_shape = image.get_shape().with_rank(rank).as_list()\n dynamic_shape = array_ops_stack.unstack(array_ops.shape(image), rank)\n return [s if s is not None else d for s, d in zip(static_shape, dynamic_shape)]", + "docstring": "Returns the dimensions of an image tensor. Args: image: A rank-D Tensor. For 3-D of shape: . rank: The expected rank of the image Returns: A list of corresponding to the dimensions of the input image. Dimensions that are statically known are python integers, otherwise, they are integer scalar tensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py", + "ast_data": "FunctionDef name:_ImageDimensions arg:image arg:rank arguments arg arg If Call Call Return return:yes Call Call Assign Call Call Call Assign Call Call Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "parents", + "source_code": "@property\ndef parents(self):\n return [self.key]", + "docstring": "See 'FeatureColumn` base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_hatch_linewidth", + "source_code": "def set_hatch_linewidth(self, lw):\n self._hatch_linewidth = lw", + "docstring": "Set the hatch linewidth.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:set_hatch_linewidth arg:self arg:lw arguments arg arg Assign" + }, + { + "library": "pytorch", + "name": "CustomFunctionHigherOrderOperatorVariable", + "source_code": "class CustomFunctionHigherOrderOperatorVariable(TorchHigherOrderOperatorVariable):\n\n def call_function(self, tx: 'InstructionTranslator', args: 'list[VariableTracker]', kwargs: 'dict[str, VariableTracker]') -> 'VariableTracker':\n return torch._dynamo.variables.UserMethodVariable(self.value.__call__.__func__, torch._dynamo.variables.UserDefinedObjectVariable(self.value, source=self.source), source=AttrSource(AttrSource(self.source, '__call__'), '__func__')).call_function(tx, args, kwargs)", + "docstring": "Wraps torch._functorch.autograd_function.custom_function_call", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\variables\\higher_order_ops.py", + "ast_data": "ClassDef name:CustomFunctionHigherOrderOperatorVariable FunctionDef name:call_function arg:self arg:tx arg:args arg:kwargs arguments arg arg arg arg Return return:yes Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_parse_op_label", + "source_code": "def _parse_op_label(self, label: str) -> Tuple[str, str, List[str]]:\n match = re.match('(.*) = (.*)\\\\((.*)\\\\)', label)\n if match is None:\n return ('unknown', 'unknown', [])\n nn, op, inputs = match.groups()\n if not inputs:\n inputs = []\n else:\n inputs = inputs.split(', ')\n return (nn, op, inputs)", + "docstring": "Parses the fields in a node timeline label.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py", + "ast_data": "FunctionDef name:_parse_op_label arg:self arg:label arguments arg arg Assign Call If Compare Return return:yes Assign Call If Assign Assign Call Return return:yes" + }, + { + "library": "scrapy", + "name": "get_host_regex", + "source_code": "def get_host_regex(self, spider: Spider) -> re.Pattern[str]:\n allowed_domains = getattr(spider, 'allowed_domains', None)\n if not allowed_domains:\n return re.compile('')\n url_pattern = re.compile('^https?://.*$')\n port_pattern = re.compile(':\\\\d+$')\n domains = []\n for domain in allowed_domains:\n if domain is None:\n continue\n if url_pattern.match(domain):\n message = f'allowed_domains accepts only domains, not URLs. Ignoring URL entry {domain} in allowed_domains.'\n warnings.warn(message, URLWarning)\n elif port_pattern.search(domain):\n message = f'allowed_domains accepts only domains without ports. Ignoring entry {domain} in allowed_domains.'\n warnings.warn(message, PortWarning)\n else:\n domains.append(re.escape(domain))\n regex = f'^(.*\\\\.)?({'|'.join(domains)})$'\n return re.compile(regex)", + "docstring": "Override this method to implement a different offsite policy", + "type": "method", + "file_path": "scrapy\\scrapy\\spidermiddlewares\\offsite.py", + "ast_data": "FunctionDef name:get_host_regex arg:self arg:spider arguments arg arg Assign Call If Return return:yes Call Assign Call Assign Call Assign For If Compare If Call Assign Call If Call Assign Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "flatten_nodes", + "source_code": "def flatten_nodes(self):\n if not self.flattened:\n self.flattened = [None] * len(self.nodes)\n for idx, node in self.nodes.items():\n self.flattened[idx] = node\n for n in self.nodes:\n if n is None:\n raise RuntimeError('Aggregate was missing argument.')\n if self.aggregation == OpHint.AGGREGATE_FIRST:\n self.flattened = self.flattened[:1]\n elif self.aggregation == OpHint.AGGREGATE_LAST:\n self.flattened = self.flattened[-1:]\n elif self.aggregation == OpHint.AGGREGATE_STACK:\n pass\n else:\n raise ValueError('Invalid aggregation type %r specified' % self.aggregation)\n return self.flattened", + "docstring": "Return a list of all the node protos in aggregation sorted order.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py", + "ast_data": "FunctionDef name:flatten_nodes arg:self arguments arg If Assign Call For Call Assign For If Compare Raise Call If Compare Assign If Compare Assign If Compare Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "fuse_nodes", + "source_code": "def fuse_nodes(self, nodes: list[BaseSchedulerNode]) -> list[BaseSchedulerNode]:\n with dynamo_timed('Scheduler.fused_nodes', log_pt2_compile_event=True, log_waitcounter=True):\n for i in range(10):\n old_len = len(nodes)\n fusion_log.debug('===== attempting fusion (%d/10): %d nodes =====', i + 1, old_len)\n nodes = self.fuse_nodes_once(nodes)\n new_len = len(nodes)\n fusion_log.debug('completed fusion round (%d/10): fused %d nodes into %d nodes\\n', i + 1, old_len, new_len)\n if new_len == old_len or new_len == 1:\n fusion_log.debug('===== fusion complete (%d iterations) =====', i + 1)\n break\n return nodes", + "docstring": "Combine eligible nodes into FusedSchedulerNodes.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:fuse_nodes arg:self arg:nodes arguments arg arg With Call For Call Assign Call Call Assign Call Assign Call Call If BoolOp Compare Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_sequence_dense_tensor", + "source_code": "def get_sequence_dense_tensor(self, transformation_cache, state_manager):\n if not isinstance(self.categorical_column, SequenceCategoricalColumn):\n raise ValueError('In indicator_column: {}. categorical_column must be of type SequenceCategoricalColumn to use SequenceFeatures. Suggested fix: Use one of sequence_categorical_column_with_*. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n dense_tensor = transformation_cache.get(self, state_manager)\n sparse_tensors = self.categorical_column.get_sparse_tensors(transformation_cache, state_manager)\n sequence_length = fc_utils.sequence_length_from_sparse_tensor(sparse_tensors.id_tensor)\n return SequenceDenseColumn.TensorSequenceLengthPair(dense_tensor=dense_tensor, sequence_length=sequence_length)", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:get_sequence_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg If Call Raise Call Call Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "_flush_body", + "source_code": "def _flush_body(self):\n consume(iter(self.body))", + "docstring": "Exhaust the body iterator. :rtype: None Discard ``.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cprequest.py", + "ast_data": "FunctionDef name:_flush_body arg:self arguments arg Call Call" + }, + { + "library": "pytorch", + "name": "initialize_parameters", + "source_code": "def initialize_parameters(self: _LazyProtocol, *args, **kwargs):\n raise NotImplementedError(f'initialize_parameters is not implemented for {self.__class__.__name__}')", + "docstring": "Initialize parameters according to the input batch properties. This adds an interface to isolate parameter initialization from the forward pass when doing parameter shape inference.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\lazy.py", + "ast_data": "FunctionDef name:initialize_parameters arg:self arguments arg arg arg Raise Call" + }, + { + "library": "kornia", + "name": "sobel", + "source_code": "def sobel(input: Tensor, normalized: bool=True, eps: float=1e-06) -> Tensor:\n KORNIA_CHECK_IS_TENSOR(input)\n KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])\n edges: Tensor = spatial_gradient(input, normalized=normalized)\n gx: Tensor = edges[:, :, 0]\n gy: Tensor = edges[:, :, 1]\n magnitude: Tensor = torch.sqrt(gx * gx + gy * gy + eps)\n return magnitude", + "docstring": "Compute the Sobel operator and returns the magnitude per channel. .. image:: _static/img/sobel.png Args: input: the input image with shape :math:. normalized: if True, L1 norm of the kernel is set to 1. eps: regularization number to avoid NaN during backprop. Return: the sobel edge gradient magnitudes map with shape :math:. .. note:: See a working example __. Example: >>> input = torch.rand(1, 3, 4, 4) >>> output = sobel(input) # 1x3x4x4 >>> output.shape torch.Size([1, 3, 4, 4])", + "type": "function", + "file_path": "kornia\\kornia\\filters\\sobel.py", + "ast_data": "FunctionDef name:sobel arg:input arg:normalized arg:eps arguments arg arg arg Call Call Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "first", + "source_code": "def first(x: Series):\n arr = x.array[notna(x.array)]\n if not len(arr):\n return x.array.dtype.na_value\n return arr[0]", + "docstring": "Helper function for first item that isn't NA.", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\groupby.py", + "ast_data": "FunctionDef name:first arg:x arguments arg Assign Call If Call Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_check_chunk_size", + "source_code": "def _check_chunk_size(reduced, chunk_size):\n if reduced is None:\n return\n is_tuple = isinstance(reduced, tuple)\n if not is_tuple:\n reduced = (reduced,)\n if any((isinstance(r, tuple) or not hasattr(r, '__iter__') for r in reduced)):\n raise TypeError('reduce_func returned %r. Expected sequence(s) of length %d.' % (reduced if is_tuple else reduced[0], chunk_size))\n if any((_num_samples(r) != chunk_size for r in reduced)):\n actual_size = tuple((_num_samples(r) for r in reduced))\n raise ValueError('reduce_func returned object of length %s. Expected same length as input: %d.' % (actual_size if is_tuple else actual_size[0], chunk_size))", + "docstring": "Checks chunk is a sequence of expected size or a tuple of same.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py", + "ast_data": "FunctionDef name:_check_chunk_size arg:reduced arg:chunk_size arguments arg arg If Compare Return return:no Assign Call If Assign If Call BoolOp Call Call Raise Call If Call Compare Call Assign Call Call Raise Call" + }, + { + "library": "tensorflow", + "name": "_configure", + "source_code": "def _configure(self, session_config=None, cluster_spec=None, task_type=None, task_id=None):\n if cluster_spec:\n cluster_resolver = cluster_resolver_lib.SimpleClusterResolver(cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec), task_type=task_type, task_id=task_id, num_accelerators={'GPU': self._num_gpus_per_worker})\n self._initialize_multi_worker(cluster_resolver)\n if session_config:\n session_config.CopyFrom(self._update_config_proto(session_config))", + "docstring": "Configures the strategy class with . The strategy object will be re-initialized if is passed to but was not passed when instantiating the strategy. Args: session_config: Session config object. cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the cluster configurations. task_type: the current task type. task_id: the current task id. Raises: ValueError: if is given but or is not.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\parameter_server_strategy.py", + "ast_data": "FunctionDef name:_configure arg:self arg:session_config arg:cluster_spec arg:task_type arg:task_id arguments arg arg arg arg arg If Assign Call Call Call If Call Call" + }, + { + "library": "pytorch", + "name": "apply_mask", + "source_code": "def apply_mask(self, module):\n assert self._tensor_name is not None, f'Module {module} has to be pruned'\n mask = getattr(module, self._tensor_name + '_mask')\n orig = getattr(module, self._tensor_name + '_orig')\n pruned_tensor = mask.to(dtype=orig.dtype) * orig\n return pruned_tensor", + "docstring": "Simply handles the multiplication between the parameter being pruned and the generated mask. Fetches the mask and the original tensor from the module and returns the pruned version of the tensor. Args: module (nn.Module): module containing the tensor to prune Returns: pruned_tensor (torch.Tensor): pruned version of the input tensor", + "type": "method", + "file_path": "pytorch\\torch\\nn\\utils\\prune.py", + "ast_data": "FunctionDef name:apply_mask arg:self arg:module arguments arg arg Compare Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "maybe_convert_to_ragged", + "source_code": "def maybe_convert_to_ragged(is_ragged_input, output, nested_row_lengths, go_backwards=False):\n if not is_ragged_input:\n return output\n if go_backwards:\n output = reverse(output, [1])\n ragged = ragged_tensor.RaggedTensor.from_tensor(output, nested_row_lengths)\n return reverse(ragged, [1])\n else:\n return ragged_tensor.RaggedTensor.from_tensor(output, nested_row_lengths)", + "docstring": "Converts any ragged input back to its initial structure.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:maybe_convert_to_ragged arg:is_ragged_input arg:output arg:nested_row_lengths arg:go_backwards arguments arg arg arg arg If Return return:yes If Assign Call Assign Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "table", + "source_code": "def table(self, sort_by=None, row_limit=100, max_src_column_width=75, max_name_column_width=55, max_shapes_column_width=80, header=None, top_level_events_only=False):\n return _build_table(self, sort_by=sort_by, row_limit=row_limit, max_src_column_width=max_src_column_width, max_name_column_width=max_name_column_width, max_shapes_column_width=max_shapes_column_width, header=header, profile_memory=self._profile_memory, with_flops=self._with_flops, top_level_events_only=top_level_events_only)", + "docstring": "Print an EventList as a nicely formatted table. Args: sort_by (str, optional): Attribute used to sort entries. By default they are printed in the same order as they were registered. Valid keys include: `lstmadd` or other functions, nested events like low-level cpu/cuda/xpu ops events are omitted for profiler result readability. Returns: A string containing the table.", + "type": "method", + "file_path": "pytorch\\torch\\autograd\\profiler_util.py", + "ast_data": "FunctionDef name:table arg:self arg:sort_by arg:row_limit arg:max_src_column_width arg:max_name_column_width arg:max_shapes_column_width arg:header arg:top_level_events_only arguments arg arg arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "inverse_transform", + "source_code": "def inverse_transform(self, X, copy=None):\n check_is_fitted(self)\n copy = copy if copy is not None else self.copy\n X = check_array(X, accept_sparse='csr', copy=copy, dtype=FLOAT_DTYPES, force_writeable=True, ensure_all_finite='allow-nan')\n if sparse.issparse(X):\n if self.with_mean:\n raise ValueError('Cannot uncenter sparse matrices: pass `with_mean=False` instead See docstring for motivation and alternatives.')\n if self.scale_ is not None:\n inplace_column_scale(X, self.scale_)\n else:\n if self.with_std:\n X *= self.scale_\n if self.with_mean:\n X += self.mean_\n return X", + "docstring": "Scale back the data to the original representation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. copy : bool, default=None Copy the input or not. Returns ------- X_original : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", + "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arg:copy arguments arg arg arg Call Assign Compare Assign Call If Call If Raise Call If Compare Call If If Return return:yes" + }, + { + "library": "numpy", + "name": "argsort", + "source_code": "def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None, *, stable=None):\n a = np.asanyarray(a)\n if axis is np._NoValue:\n axis = _deprecate_argsort_axis(a)\n if isinstance(a, MaskedArray):\n return a.argsort(axis=axis, kind=kind, order=order, endwith=endwith, fill_value=fill_value, stable=None)\n else:\n return a.argsort(axis=axis, kind=kind, order=order, stable=None)", + "docstring": "Function version of the eponymous method.", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:argsort arg:a arg:axis arg:kind arg:order arg:endwith arg:fill_value arguments arg arg arg arg arg arg arg Assign Call If Compare Assign Call If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "set_and_validate_objects", + "source_code": "def set_and_validate_objects(self, object_dict):\n for key in self.all_checkpointable_objects:\n if key in object_dict:\n if not isinstance(object_dict[key], trackable.Trackable):\n raise ValueError('Object dictionary contained a non-trackable object: {} (for key {})'.format(object_dict[key], key))\n self._object_dict[key] = object_dict[key]\n setattr(self._keras_trackable, key, object_dict[key])\n else:\n raise ValueError('Object {} missing from serialized object dict.'.format(key))\n return self.checkpointable_objects", + "docstring": "Saves objects to a dictionary, and validates the values.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py", + "ast_data": "FunctionDef name:set_and_validate_objects arg:self arg:object_dict arguments arg arg For If Compare If Call Raise Call Call Assign Call Raise Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "_values_for_json", + "source_code": "def _values_for_json(self) -> np.ndarray:\n return np.asarray(self)", + "docstring": "Specify how to render our entries in to_json. Notes ----- The dtype on the returned ndarray is not restricted, but for non-native types that are not specifically handled in objToJSON.c, to_json is liable to raise. In these cases, it may be safer to return an ndarray of strings.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:_values_for_json arg:self arguments arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "validate", + "source_code": "def validate(self, names, defaultfmt='f%i', nbfields=None):\n if names is None:\n if nbfields is None:\n return None\n names = []\n if isinstance(names, str):\n names = [names]\n if nbfields is not None:\n nbnames = len(names)\n if nbnames < nbfields:\n names = list(names) + [''] * (nbfields - nbnames)\n elif nbnames > nbfields:\n names = names[:nbfields]\n deletechars = self.deletechars\n excludelist = self.excludelist\n case_converter = self.case_converter\n replace_space = self.replace_space\n validatednames = []\n seen = {}\n nbempty = 0\n for item in names:\n item = case_converter(item).strip()\n if replace_space:\n item = item.replace(' ', replace_space)\n item = ''.join([c for c in item if c not in deletechars])\n if item == '':\n item = defaultfmt % nbempty\n while item in names:\n nbempty += 1\n item = defaultfmt % nbempty\n nbempty += 1\n elif item in excludelist:\n item += '_'\n cnt = seen.get(item, 0)\n if cnt > 0:\n validatednames.append(item + '_%d' % cnt)\n else:\n validatednames.append(item)\n seen[item] = cnt + 1\n return tuple(validatednames)", + "docstring": "Validate a list of strings as field names for a structured array. Parameters ---------- names : sequence of str Strings to be validated. defaultfmt : str, optional Default format string, used if validating a given string reduces its length to zero. nbfields : integer, optional Final number of validated names, used to expand or shrink the initial list of names. Returns ------- validatednames : list of str The list of validated field names. Notes ----- A instance can be called directly, which is the same as calling . For examples, see .", + "type": "method", + "file_path": "numpy\\numpy\\lib\\_iotools.py", + "ast_data": "FunctionDef name:validate arg:self arg:names arg:defaultfmt arg:nbfields arguments arg arg arg arg If Compare If Compare Return return:no Assign If Call Assign If Compare Assign Call If Compare Assign Call If Compare Assign Assign Assign Assign Assign Assign Assign Assign For Assign Call Call If Assign Call Assign Call Compare If Compare Assign While Compare Assign If Compare Assign Call If Compare Call Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "local_shards", + "source_code": "def local_shards(self) -> list[Shard]:\n return self._local_shards", + "docstring": "Returns a list of :class:`Shard' corresponding to the local shards for this rank. Returns an empty list if the current rank does not host any shards for this Tensor.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\api.py", + "ast_data": "FunctionDef name:local_shards arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "BadAttributeType", + "source_code": "class BadAttributeType(ArffException):\n message = 'Bad @ATTRIBUTE type, at line %d.'", + "docstring": "Error raised when some invalid type is provided into the attribute declaration.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\externals\\_arff.py", + "ast_data": "ClassDef name:BadAttributeType Assign" + }, + { + "library": "tensorflow", + "name": "_check_type", + "source_code": "def _check_type(obj, expected_types):\n if not isinstance(obj, expected_types):\n raise TypeError('Expected type %s; got type %s' % (expected_types, type(obj)))", + "docstring": "Check if an object is of the expected type. Args: obj: The object being checked. expected_types: ( or an iterable of s) The expected (s) of obj. Raises: TypeError: If obj is not an instance of expected_type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py", + "ast_data": "FunctionDef name:_check_type arg:obj arg:expected_types arguments arg arg If Call Raise Call Call" + }, + { + "library": "scipy", + "name": "kstwo_gen", + "source_code": "class kstwo_gen(rv_continuous):\n\n def _argcheck(self, n):\n return (n >= 1) & (n == np.round(n))\n\n def _shape_info(self):\n return [_ShapeInfo('n', True, (1, np.inf), (True, False))]\n\n def _get_support(self, n):\n return (0.5 / (n if not isinstance(n, Iterable) else np.asanyarray(n)), 1.0)\n\n def _pdf(self, x, n):\n return kolmognp(n, x)\n\n def _cdf(self, x, n):\n return kolmogn(n, x)\n\n def _sf(self, x, n):\n return kolmogn(n, x, cdf=False)\n\n def _ppf(self, q, n):\n return kolmogni(n, q, cdf=True)\n\n def _isf(self, q, n):\n return kolmogni(n, q, cdf=False)", + "docstring": "Kolmogorov-Smirnov two-sided test statistic distribution. This is the distribution of the two-sided Kolmogorov-Smirnov (KS) statistic :math: for a finite sample size `D_nFF_nkstwonF`: >>> vals = kstwo.ppf([0.001, 0.5, 0.999], n) >>> np.allclose([0.001, 0.5, 0.999], kstwo.cdf(vals, n)) True", + "type": "class", + "file_path": "scipy\\scipy\\stats\\_continuous_distns.py", + "ast_data": "ClassDef name:kstwo_gen FunctionDef name:_argcheck arg:self arg:n arguments arg arg Return return:yes Compare Compare Call FunctionDef name:_shape_info arg:self arguments arg Return return:yes Call FunctionDef name:_get_support arg:self arg:n arguments arg arg Return return:yes Call Call FunctionDef name:_pdf arg:self arg:x arg:n arguments arg arg arg Return return:yes Call FunctionDef name:_cdf arg:self arg:x arg:n arguments arg arg arg Return return:yes Call FunctionDef name:_sf arg:self arg:x arg:n arguments arg arg arg Return return:yes Call FunctionDef name:_ppf arg:self arg:q arg:n arguments arg arg arg Return return:yes Call FunctionDef name:_isf arg:self arg:q arg:n arguments arg arg arg Return return:yes Call" + }, + { + "library": "seaborn", + "name": "_during", + "source_code": "@classmethod\ndef _during(cls, step: str, var: str='') -> PlotSpecError:\n message = []\n if var:\n message.append(f'{step} failed for the `{var}` variable.')\n else:\n message.append(f'{step} failed.')\n message.append('See the traceback above for more information.')\n return cls(' '.join(message))", + "docstring": "Initialize the class to report the failure of a specific operation.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\exceptions.py", + "ast_data": "FunctionDef name:_during arg:cls arg:step arg:var arguments arg arg arg Assign If Call Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_python_version", + "source_code": "def get_python_version():\n ver = str(sys.version_info)\n mmm = re.search('.*major=([\\\\d]), minor=([\\\\d]), micro=([\\\\d]+),.*', ver)\n return mmm.group(1) + '.' + mmm.group(2) + '.' + mmm.group(3)", + "docstring": "Retrieves default Python version. Returns: String that is the version of default Python. e.g. '2.7.4'", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py", + "ast_data": "FunctionDef name:get_python_version arguments Assign Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "django", + "name": "__set__", + "source_code": "def __set__(self, instance, value):\n if value is not None and (not isinstance(value, self.field.remote_field.model._meta.concrete_model)):\n raise ValueError('Cannot assign \"%r\": \"%s.%s\" must be a \"%s\" instance.' % (value, instance._meta.object_name, self.field.name, self.field.remote_field.model._meta.object_name))\n elif value is not None:\n if instance._state.db is None:\n instance._state.db = router.db_for_write(instance.__class__, instance=value)\n if value._state.db is None:\n value._state.db = router.db_for_write(value.__class__, instance=instance)\n if not router.allow_relation(value, instance):\n raise ValueError('Cannot assign \"%r\": the current database router prevents this relation.' % value)\n remote_field = self.field.remote_field\n if value is None:\n related = self.field.get_cached_value(instance, default=None)\n if related is not None:\n remote_field.set_cached_value(related, None)\n for lh_field, rh_field in self.field.related_fields:\n setattr(instance, lh_field.attname, None)\n else:\n for lh_field, rh_field in self.field.related_fields:\n setattr(instance, lh_field.attname, getattr(value, rh_field.attname))\n self.field.set_cached_value(instance, value)\n if value is not None and (not remote_field.multiple):\n remote_field.set_cached_value(value, instance)", + "docstring": "Set the related instance through the forward relation. With the example above, when setting `` instance on the right of the equal sign", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\related_descriptors.py", + "ast_data": "FunctionDef name:__set__ arg:self arg:instance arg:value arguments arg arg arg If BoolOp Compare Call Raise Call If Compare If Compare Assign Call If Compare Assign Call If Call Raise Call Assign If Compare Assign Call If Compare Call For Call For Call Call Call If BoolOp Compare Call" + }, + { + "library": "django", + "name": "clean_old_password", + "source_code": "@sensitive_variables('old_password')\ndef clean_old_password(self):\n old_password = self.cleaned_data['old_password']\n if not self.user.check_password(old_password):\n raise ValidationError(self.error_messages['password_incorrect'], code='password_incorrect')\n return old_password", + "docstring": "Validate that the old_password field is correct.", + "type": "method", + "file_path": "django\\django\\contrib\\auth\\forms.py", + "ast_data": "FunctionDef name:clean_old_password arg:self arguments arg Assign If Call Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self):\n self._replica_id = None\n self._tt_config = tensor_tracer_report.TensorTracerConfig()\n self._parameters = tensor_tracer_flags.TTParameters()\n self._host_call_fn = {}\n self._cache_variables = {}\n self._history_value_cache = {}\n self._traced_op_names = set()\n self._report_proto = None\n self._temp_cache_var = {}\n self._report_proto_path = ''\n self._outmost_context = None", + "docstring": "Initializes a TensorTracer. Sets the various member fields from the flags (if given) or the defaults.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign Assign Call Assign Call Assign Assign Assign Assign Call Assign Assign Assign Assign" + }, + { + "library": "matplotlib", + "name": "TexMetrics", + "source_code": "@dataclasses.dataclass(frozen=True, kw_only=True)\nclass TexMetrics:\n tex_width: int\n tex_height: int\n tex_depth: int", + "docstring": "Metrics of a glyph, with TeX semantics. TeX metrics have different semantics from FreeType metrics: tex_width corresponds to FreeType's `` (how much the glyph extends under the baseline, as a positive number).", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\dviread.py", + "ast_data": "ClassDef name:TexMetrics Call" + }, + { + "library": "scikit-learn", + "name": "decision_function", + "source_code": "@available_if(_search_estimator_has('decision_function'))\ndef decision_function(self, X):\n check_is_fitted(self)\n return self.best_estimator_.decision_function(X)", + "docstring": "Call decision_function on the estimator with the best found parameters. Only available if `X` based on the estimator with the best found parameters.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py", + "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Call Return return:yes Call Call Call" + }, + { + "library": "authlib", + "name": "get_extra_claims", + "source_code": "def get_extra_claims(self, client, grant_type, user, scope):\n return {}", + "docstring": "Return extra claims to add in the JWT access token. Developers MAY re-implement this method to add identity claims like the ones in :ref: ID Token, or any other arbitrary claims:: def get_extra_claims(self, client, grant_type, user, scope): return generate_user_info(user, scope)", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc9068\\token.py", + "ast_data": "FunctionDef name:get_extra_claims arg:self arg:client arg:grant_type arg:user arg:scope arguments arg arg arg arg arg Return return:no" + }, + { + "library": "matplotlib", + "name": "flush_events", + "source_code": "def flush_events(self):\n pass", + "docstring": "Flush the GUI events for the figure. Interactive backends need to reimplement this method.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:flush_events arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "_is_supported_batch_norm_for_training", + "source_code": "def _is_supported_batch_norm_for_training(node: Node):\n supported_ops = [torch.ops.aten.batch_norm.default, torch.ops.aten._native_batch_norm_legit.default, torch.ops.aten.cudnn_batch_norm.default, torch.ops.aten.miopen_batch_norm.default]\n return node.target in supported_ops", + "docstring": "Return True if the given node refers to an aten batch norm op QAT supports.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\utils.py", + "ast_data": "FunctionDef name:_is_supported_batch_norm_for_training arg:node arguments arg Assign Return return:yes Compare" + }, + { + "library": "matplotlib", + "name": "SpinesProxy", + "source_code": "class SpinesProxy:\n\n def __init__(self, spine_dict):\n self._spine_dict = spine_dict\n\n def __getattr__(self, name):\n broadcast_targets = [spine for spine in self._spine_dict.values() if hasattr(spine, name)]\n if name != 'set' and (not name.startswith('set_')) or not broadcast_targets:\n raise AttributeError(f\"'SpinesProxy' object has no attribute '{name}'\")\n\n def x(_targets, _funcname, *args, **kwargs):\n for spine in _targets:\n getattr(spine, _funcname)(*args, **kwargs)\n x = functools.partial(x, broadcast_targets, name)\n x.__doc__ = broadcast_targets[0].__doc__\n return x\n\n def __dir__(self):\n names = []\n for spine in self._spine_dict.values():\n names.extend((name for name in dir(spine) if name.startswith('set_')))\n return list(sorted(set(names)))", + "docstring": "A proxy to broadcast `.Spines`. The proxy cannot be used for any other operations on its members. The supported methods are determined dynamically based on the contained spines. If not all spines support a given method, it's executed only on the subset of spines that support it.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\spines.py", + "ast_data": "ClassDef name:SpinesProxy FunctionDef name:__init__ arg:self arg:spine_dict arguments arg arg Assign FunctionDef name:__getattr__ arg:self arg:name arguments arg arg Assign Call Call If BoolOp BoolOp Compare Call Raise Call FunctionDef name:x arg:_targets arg:_funcname arguments arg arg arg arg For Call Call Assign Call Assign Return return:yes FunctionDef name:__dir__ arg:self arguments arg Assign For Call Call Call Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "build", + "source_code": "def build(self):\n return copy.deepcopy(self._options)", + "docstring": "Build a profiling option. Returns: A dict of profiling options.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py", + "ast_data": "FunctionDef name:build arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "NotInitializedError", + "source_code": "@tf_export('__internal__.distribute.multi_process_runner.NotInitializedError', v1=[])\nclass NotInitializedError(RuntimeError):\n pass", + "docstring": "An error indicating is used without init. When this is raised, user is supposed to call within block to properly initialize .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py", + "ast_data": "ClassDef name:NotInitializedError Call" + }, + { + "library": "pytorch", + "name": "_low_contention_all_gather", + "source_code": "@torch.library.impl(lib, '_low_contention_all_gather', 'CUDA')\ndef _low_contention_all_gather(tensor: torch.Tensor, group_name: str) -> torch.Tensor:\n symm_mem = rendezvous(tensor, group_name)\n if symm_mem is not None:\n input_is_symm_mem = True\n else:\n symm_mem = get_symm_mem_workspace(group_name, tensor.numel() * tensor.element_size())\n input_is_symm_mem = False\n rank = symm_mem.rank\n world_size = symm_mem.world_size\n output = tensor.new_empty(tensor.shape[0] * world_size, *tensor.shape[1:])\n chunks = output.chunk(world_size)\n _get_backend_stream().wait_stream(torch.cuda.current_stream())\n with _get_backend_stream():\n if not input_is_symm_mem:\n local_buf = symm_mem.get_buffer(rank, tensor.shape, tensor.dtype)\n local_buf.copy_(tensor)\n symm_mem.barrier()\n for step in range(0, world_size):\n remote_rank = (rank - step) % world_size\n src_buf = symm_mem.get_buffer(remote_rank, tensor.shape, tensor.dtype)\n chunks[remote_rank].copy_(src_buf)\n symm_mem.barrier()\n torch._C._distributed_c10d._register_work(output, Work())\n return output", + "docstring": "Performs all-gather with symmetric memory in a low-contention fashion. When is already in symmetric memory: - The collective is carried out without using SMs. - No symmetric memory workspace is required. When is not in symmetric memory: - An extra SM-based copy is performed to copy the input data into the symmetric memory workspace. - Symmetric memory workspace size requirement: the size of .", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py", + "ast_data": "FunctionDef name:_low_contention_all_gather arg:tensor arg:group_name arguments arg arg Assign Call If Compare Assign Assign Call Call Call Assign Assign Assign Assign Call Assign Call Call Call Call With Call If Assign Call Call Call For Call Assign Assign Call Call Call Call Call Return return:yes Call" + }, + { + "library": "scrapy", + "name": "find_by_request", + "source_code": "def find_by_request(request):\n pass", + "docstring": "Return the list of spiders names that can handle the given request", + "type": "method", + "file_path": "scrapy\\scrapy\\interfaces.py", + "ast_data": "FunctionDef name:find_by_request arg:request arguments arg" + }, + { + "library": "django", + "name": "has_permission", + "source_code": "def has_permission(self, request):\n return request.user.is_active and request.user.is_staff", + "docstring": "Return True if the given HttpRequest has permission to view *at least one* page in the admin site.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\sites.py", + "ast_data": "FunctionDef name:has_permission arg:self arg:request arguments arg arg Return return:yes BoolOp" + }, + { + "library": "pytorch", + "name": "double", + "source_code": "def double(self):\n _warn_typed_storage_removal()\n return self._to(torch.double)", + "docstring": "Casts this storage to double type.", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:double arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "r2r_fftpack", + "source_code": "def r2r_fftpack(forward, x, n=None, axis=-1, norm=None, overwrite_x=False):\n tmp = _asfarray(x)\n overwrite_x = overwrite_x or _datacopied(tmp, x)\n norm = _normalization(norm, forward)\n workers = _workers(None)\n if tmp.dtype.kind == 'c':\n raise TypeError('x must be a real sequence')\n if n is not None:\n tmp, copied = _fix_shape_1d(tmp, n, axis)\n overwrite_x = overwrite_x or copied\n elif tmp.shape[axis] < 1:\n raise ValueError(f'invalid number of data points ({tmp.shape[axis]}) specified')\n out = tmp if overwrite_x else None\n return pfft.r2r_fftpack(tmp, (axis,), forward, forward, norm, out, workers)", + "docstring": "FFT of a real sequence, returning fftpack half complex format", + "type": "function", + "file_path": "scipy\\scipy\\fft\\_pocketfft\\basic.py", + "ast_data": "FunctionDef name:r2r_fftpack arg:forward arg:x arg:n arg:axis arg:norm arg:overwrite_x arguments arg arg arg arg arg arg Assign Call Assign BoolOp Call Assign Call Assign Call If Compare Raise Call If Compare Assign Call Assign BoolOp If Compare Raise Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_rename_dynamic_axes", + "source_code": "def _rename_dynamic_axes(self, dynamic_shapes: dict[str, Any] | tuple[Any, ...] | list[Any]) -> None:\n rename_mapping = _dynamic_shapes.create_rename_mapping(self.model.graph.inputs, dynamic_shapes)\n _ir_passes.rename_axis(self.model, rename_mapping)", + "docstring": "Rename dynamic axes in a model according to the specified dynamic_axes names.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py", + "ast_data": "FunctionDef name:_rename_dynamic_axes arg:self arg:dynamic_shapes arguments arg arg Assign Call Call" + }, + { + "library": "django", + "name": "ewkt", + "source_code": "@property\ndef ewkt(self):\n srs = self.srs\n if srs and srs.srid:\n return 'SRID=%s;%s' % (srs.srid, self.wkt)\n else:\n return self.wkt", + "docstring": "Return the EWKT representation of the Geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:ewkt arg:self arguments arg Assign If BoolOp Return return:yes Return return:yes" + }, + { + "library": "kornia", + "name": "inverse", + "source_code": "def inverse(self, input: Tensor, params: Optional[List[ParamItem]]=None, extra_args: Optional[Dict[str, Any]]=None) -> Tensor:\n if params is None:\n if self._params is not None:\n params = self._params\n else:\n raise RuntimeError('No valid params to inverse the transformation.')\n return self.inverse_inputs(input, params, extra_args=extra_args)", + "docstring": "Inverse transformation. Used to inverse a tensor according to the performed transformation by a forward pass, or with respect to provided parameters.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\video.py", + "ast_data": "FunctionDef name:inverse arg:self arg:input arg:params arg:extra_args arguments arg arg arg arg If Compare If Compare Assign Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_build_orthogonal_rings", + "source_code": "def _build_orthogonal_rings(core_locations: List[_CoreLocation], ring_size: int, rotate_ring_across_rings: bool) -> List[_CoreLocation]:\n num_cores = len(core_locations)\n permutation = _build_all_reduce_ring(core_locations[:ring_size])\n for r in range(0, num_cores, ring_size):\n core_locations[r:r + ring_size] = [core_locations[r + permutation[i]] for i in range(ring_size)]\n logging.vlog(1, 'Permutated core locations: %s', core_locations)\n transposed = []\n for i in range(ring_size):\n transposed += [core_locations[g + i] for g in range(0, num_cores, ring_size)]\n num_rings = int(num_cores / ring_size)\n permutation = _build_all_reduce_ring(transposed[:num_rings], rotate=rotate_ring_across_rings)\n for r in range(0, num_cores, num_rings):\n transposed[r:r + num_rings] = [transposed[r + permutation[i]] for i in range(num_rings)]\n untransposed = []\n for i in range(num_rings):\n untransposed += [transposed[g + i] for g in range(0, num_cores, num_rings)]\n logging.vlog(1, 'Stride-permutated core locations: %s', untransposed)\n return untransposed", + "docstring": "Build two all-reduce rings orthogonal to each other. One ring includes every consecutive core locations. It is usually applied to the model-parallel dimension of a mesh to achieve best 1D all-reduce performance. The other ring includes core locations separated by a stride of . It is usually applied to the data-parallel dimension of a mesh to get predictable strided all-reduce performance. Args: core_locations: A list of core locations expressed as [x, y, z, core]. ring_size: The number of core locations in the consecutive ring. rotate_ring_across_rings: Build column-major secondary rings. Returns: A permutation of the input list forming the described rings.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\tpu_util.py", + "ast_data": "FunctionDef name:_build_orthogonal_rings arg:core_locations arg:ring_size arg:rotate_ring_across_rings arguments arg arg arg Assign Call Assign Call For Call Assign Call Call Assign For Call Call Assign Call Assign Call For Call Assign Call Assign For Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "read_var", + "source_code": "def read_var(self, v):\n raise NotImplementedError('must be implemented in descendants')", + "docstring": "Reads the value of a variable. Returns the aggregate value of a replica-local variable, or the (read-only) value of any other variable. Args: v: A variable allocated within the scope of this . Returns: A tensor representing the value of , aggregated across replicas if necessary.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:read_var arg:self arg:v arguments arg arg Raise Call" + }, + { + "library": "matplotlib", + "name": "_get_data_scale", + "source_code": "def _get_data_scale(X, Y, Z):\n if not np.ma.count(X):\n return 0\n ptp_x = X.max() - X.min()\n ptp_y = Y.max() - Y.min()\n ptp_z = Z.max() - Z.min()\n return np.sqrt(ptp_x ** 2 + ptp_y ** 2 + ptp_z ** 2)", + "docstring": "Estimate the scale of the 3D data for use in depth shading Parameters ---------- X, Y, Z : masked arrays The data to estimate the scale of.", + "type": "function", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:_get_data_scale arg:X arg:Y arg:Z arguments arg arg arg If Call Return return:yes Assign Call Call Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_convert_complex_to_real_representation", + "source_code": "def _convert_complex_to_real_representation(model_args):\n return tuple((torch.view_as_real(arg.resolve_conj()) if isinstance(arg, torch.Tensor) and arg.is_complex() else arg for arg in model_args))", + "docstring": "Convert complex dtype tensors to real representation tensors. ONNX does not support complex dtype tensors. Thus, we convert complex dtype tensors to real representation tensors (i.e., float dtype tensors with an extra dimension representing the real and imaginary parts of the complex number).", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py", + "ast_data": "FunctionDef name:_convert_complex_to_real_representation arg:model_args arguments arg Return return:yes Call BoolOp Call Call Call Call" + }, + { + "library": "scipy", + "name": "sort_indices", + "source_code": "def sort_indices(self):\n if not self.has_sorted_indices:\n M = len(self.indptr) - 1\n csr_sort_indices(M, self.indptr, self.indices, self.data)\n self.has_sorted_indices = True", + "docstring": "Sort the indices of this array/matrix *in place*", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_compressed.py", + "ast_data": "FunctionDef name:sort_indices arg:self arguments arg If Assign Call Call Assign" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, sample_weight=None):\n X, y = validate_data(self, X, y, accept_sparse=('csr', 'csc'), multi_output=True, y_numeric=True)\n if sample_weight is not None and (not isinstance(sample_weight, float)):\n sample_weight = _check_sample_weight(sample_weight, X)\n K = self._get_kernel(X)\n alpha = np.atleast_1d(self.alpha)\n ravel = False\n if len(y.shape) == 1:\n y = y.reshape(-1, 1)\n ravel = True\n copy = self.kernel == 'precomputed'\n self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha, sample_weight, copy)\n if ravel:\n self.dual_coef_ = self.dual_coef_.ravel()\n self.X_fit_ = X\n return self", + "docstring": "Fit Kernel Ridge regression model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. If kernel == \"precomputed\" this is instead a precomputed kernel matrix, of shape (n_samples, n_samples). y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. sample_weight : float or array-like of shape (n_samples,), default=None Individual weights for each sample, ignored if None is passed. Returns ------- self : object Returns the instance itself.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\kernel_ridge.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call If BoolOp Compare Call Assign Call Assign Call Assign Call Assign If Compare Call Assign Call Assign Assign Compare Assign Call If Assign Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_annotate_conv_transpose_bn", + "source_code": "@register_annotator('conv_transpose_bn')\ndef _annotate_conv_transpose_bn(gm: torch.fx.GraphModule, quantization_config: Optional[QuantizationConfig], filter_fn: Optional[Callable[[Node], bool]]=None) -> Optional[list[list[Node]]]:\n return _do_annotate_conv_bn(gm, quantization_config, filter_fn, has_relu=False, is_conv_transpose=True)", + "docstring": "Find conv_transpose + batchnorm parititions Note: This is only used for QAT. In PTQ, batchnorm should already be fused into the conv.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\xnnpack_quantizer_utils.py", + "ast_data": "FunctionDef name:_annotate_conv_transpose_bn arg:gm arg:quantization_config arg:filter_fn arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_slot_initializers", + "source_code": "@abc.abstractmethod\ndef _slot_initializers(self) -> List[init_ops_v2.Initializer]:\n raise NotImplementedError", + "docstring": "Returns initializers for slot variables. This returns a parallel list to self._slot_names().", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py", + "ast_data": "FunctionDef name:_slot_initializers arg:self arguments arg Raise" + }, + { + "library": "tensorflow", + "name": "output", + "source_code": "@property\ndef output(self) -> Optional[trace.TraceType]:\n return self.return_annotation if self.return_annotation is not self.empty else None", + "docstring": "Return the output TraceType if specified.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py", + "ast_data": "FunctionDef name:output arg:self arguments arg Return return:yes Compare" + }, + { + "library": "numpy", + "name": "_CopyMode", + "source_code": "@_set_module('numpy')\nclass _CopyMode(enum.Enum):\n ALWAYS = True\n NEVER = False\n IF_NEEDED = 2\n\n def __bool__(self):\n if self == _CopyMode.ALWAYS:\n return True\n if self == _CopyMode.NEVER:\n return False\n raise ValueError(f'{self} is neither True nor False.')", + "docstring": "An enumeration for the copy modes supported by numpy.copy() and numpy.array(). The following three modes are supported, - ALWAYS: This means that a deep copy of the input array will always be taken. - IF_NEEDED: This means that a deep copy of the input array will be taken only if necessary. - NEVER: This means that the deep copy will never be taken. If a copy cannot be avoided then a will be raised. Note that the buffer-protocol could in theory do copies. NumPy currently assumes an object exporting the buffer protocol will never do this.", + "type": "class", + "file_path": "numpy\\numpy\\_globals.py", + "ast_data": "ClassDef name:_CopyMode Assign Assign Assign FunctionDef name:__bool__ arg:self arguments arg If Compare Return return:yes If Compare Return return:yes Raise Call Call" + }, + { + "library": "pandas", + "name": "_unconvert_string_array", + "source_code": "def _unconvert_string_array(data: np.ndarray, nan_rep, encoding: str, errors: str) -> np.ndarray:\n shape = data.shape\n data = np.asarray(data.ravel(), dtype=object)\n if len(data):\n itemsize = libwriters.max_len_string_array(ensure_object(data))\n dtype = f'U{itemsize}'\n if isinstance(data[0], bytes):\n ser = Series(data, copy=False).str.decode(encoding, errors=errors, dtype='object')\n data = ser.to_numpy()\n data.flags.writeable = True\n else:\n data = data.astype(dtype, copy=False).astype(object, copy=False)\n if nan_rep is None:\n nan_rep = 'nan'\n libwriters.string_array_replace_from_nan_rep(data, nan_rep)\n return data.reshape(shape)", + "docstring": "Inverse of _convert_string_array. Parameters ---------- data : np.ndarray[fixed-length-string] nan_rep : the storage repr of NaN encoding : str errors : str Handler for encoding errors. Returns ------- np.ndarray[object] Decoded data.", + "type": "function", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:_unconvert_string_array arg:data arg:nan_rep arg:encoding arg:errors arguments arg arg arg arg Assign Assign Call Call If Call Assign Call Call Assign If Call Assign Call Call Assign Call Assign Assign Call Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "load_graph", + "source_code": "def load_graph(self, returns, meta_graph_def):\n saver, _ = tf_saver._import_meta_graph_with_return_elements(meta_graph_def)\n returns[0] = saver", + "docstring": "Called from wrap_function to import .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load_v1_in_v2.py", + "ast_data": "FunctionDef name:load_graph arg:self arg:returns arg:meta_graph_def arguments arg arg arg Assign Call Assign" + }, + { + "library": "django", + "name": "conditional_expression_supported_in_where_clause", + "source_code": "def conditional_expression_supported_in_where_clause(self, expression):\n return True", + "docstring": "Return True, if the conditional expression is supported in the WHERE clause.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\operations.py", + "ast_data": "FunctionDef name:conditional_expression_supported_in_where_clause arg:self arg:expression arguments arg arg Return return:yes" + }, + { + "library": "pandas", + "name": "_convert_to_color", + "source_code": "@classmethod\ndef _convert_to_color(cls, color_spec):\n from openpyxl.styles import Color\n if isinstance(color_spec, str):\n return Color(color_spec)\n else:\n return Color(**color_spec)", + "docstring": "Convert `` to an openpyxl v2 Color object. Parameters ---------- color_spec : str, dict A 32-bit ARGB hex string, or a dict with zero or more of the following keys. 'rgb' 'indexed' 'auto' 'theme' 'tint' 'index' 'type' Returns ------- color : openpyxl.styles.Color", + "type": "method", + "file_path": "pandas\\pandas\\io\\excel\\_openpyxl.py", + "ast_data": "FunctionDef name:_convert_to_color arg:cls arg:color_spec arguments arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_barrier_nonblocking", + "source_code": "def _barrier_nonblocking(store, world_size: int, key_prefix: str) -> str:\n num_members_key = key_prefix + _NUM_MEMBERS\n last_member_key = key_prefix + _LAST_MEMBER_CHECKIN\n idx = store.add(num_members_key, 1)\n if idx == world_size:\n store.set(last_member_key, '')\n return last_member_key", + "docstring": "Does all the non-blocking operations for a barrier and returns the final key that can be waited on.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\elastic\\utils\\store.py", + "ast_data": "FunctionDef name:_barrier_nonblocking arg:store arg:world_size arg:key_prefix arguments arg arg arg Assign Assign Assign Call If Compare Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_iteritems", + "source_code": "def _iteritems(d):\n return d.iteritems() if hasattr(d, 'iteritems') else d.items()", + "docstring": "Like d.iteritems, but accepts any collections.Mapping.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\feature_extraction\\_hash.py", + "ast_data": "FunctionDef name:_iteritems arg:d arguments arg Return return:yes Call Call Call" + }, + { + "library": "django", + "name": "pnt_func", + "source_code": "def pnt_func(f):\n return double_output(f, [c_void_p, c_int])", + "docstring": "For accessing point information.", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\geom.py", + "ast_data": "FunctionDef name:pnt_func arg:f arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_FSDPModState", + "source_code": "class _FSDPModState(_State):\n BEF_PRE_FW = 'Before Pre-Forward'\n AFT_PRE_FW = 'After Pre-Forward'\n BEF_POST_FW = 'Before Post-Forward'\n AFT_POST_FW = 'After Post-Forward'\n BEF_PRE_BW = 'Before Pre-Backward'\n AFT_PRE_BW = 'After Pre-Backward'\n BEF_POST_BW = 'Before Post-Backward'\n AFT_POST_BW = 'After Post-Backward'\n PRE_FW_AC = 'Pre-Forward AC'\n POST_FW_AC = 'Post-Forward AC'\n PEAK_FW = 'Peak Forward'\n PEAK_BW = 'Peak Backward'", + "docstring": "Enumerates the states of FSDP modules during the forward and backward passes.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\_tools\\fsdp2_mem_tracker.py", + "ast_data": "ClassDef name:_FSDPModState Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "_assert_identical_pytree_spec", + "source_code": "def _assert_identical_pytree_spec(spec1: pytree.TreeSpec, spec2: pytree.TreeSpec, error_message: str) -> None:\n pass_if_any_checks: Sequence[Callable[[], bool]] = [lambda: spec1 == spec2, lambda: _replace_list_with_tuple(spec1) == _replace_list_with_tuple(spec2), lambda: _open_top_level_sequence_if_single_element(spec1) == spec2, lambda: spec1 == _open_top_level_sequence_if_single_element(spec2)]\n if not any((check() for check in pass_if_any_checks)):\n raise ValueError(f'{error_message}\\nExpect {spec1}.\\nActual {spec2}.')", + "docstring": "Assert the two objects are identical. Args: spec1: The first object. spec2: The second object. error_message: The error message to raise if the two objects are not identical. Raises: ValueError: If the two objects are not identical.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py", + "ast_data": "FunctionDef name:_assert_identical_pytree_spec arg:spec1 arg:spec2 arg:error_message arguments arg arg arg arguments Compare arguments Compare Call Call arguments Compare Call arguments Compare Call If Call Call Raise Call" + }, + { + "library": "numpy", + "name": "rec_append_fields", + "source_code": "@array_function_dispatch(_rec_append_fields_dispatcher)\ndef rec_append_fields(base, names, data, dtypes=None):\n return append_fields(base, names, data=data, dtypes=dtypes, asrecarray=True, usemask=False)", + "docstring": "Add new fields to an existing array. The names of the fields are given with the arguments, the corresponding values with the arguments. If a single field is appended, , and do not have to be lists but just values. Parameters ---------- base : array Input array to extend. names : string, sequence String or sequence of strings corresponding to the names of the new fields. data : array or sequence of arrays Array or sequence of arrays storing the fields to add to the base. dtypes : sequence of datatypes, optional Datatype or sequence of datatypes. If None, the datatypes are estimated from the . See Also -------- append_fields Returns ------- appended_array : np.recarray", + "type": "function", + "file_path": "numpy\\numpy\\lib\\recfunctions.py", + "ast_data": "FunctionDef name:rec_append_fields arg:base arg:names arg:data arg:dtypes arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "ReferenceVariableSaveable", + "source_code": "class ReferenceVariableSaveable(saveable_object.SaveableObject):\n\n def __init__(self, var, slice_spec, name):\n spec = saveable_object.SaveSpec(var, slice_spec, name, dtype=var.dtype)\n super(ReferenceVariableSaveable, self).__init__(var, [spec], name)\n\n def restore(self, restored_tensors, restored_shapes):\n restored_tensor = restored_tensors[0]\n if restored_shapes is not None:\n restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])\n return state_ops.assign(self.op, restored_tensor, validate_shape=restored_shapes is None and self.op.get_shape().is_fully_defined())", + "docstring": "SaveableObject implementation that handles reference variables.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py", + "ast_data": "ClassDef name:ReferenceVariableSaveable FunctionDef name:__init__ arg:self arg:var arg:slice_spec arg:name arguments arg arg arg arg Assign Call Call Call FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg Assign If Compare Assign Call Return return:yes Call BoolOp Compare Call Call" + }, + { + "library": "pytorch", + "name": "speedup_experiment_fx2trt", + "source_code": "def speedup_experiment_fx2trt(args, model_iter_fn, model, example_inputs):\n return speedup_experiment(args, model_iter_fn, model, example_inputs)", + "docstring": "Measure speedups over eager using the trt inference backend. TRT backend is based fx graph generated by torch._dynamo. Writes to ./speedups_fx2trt.csv", + "type": "function", + "file_path": "pytorch\\benchmarks\\dynamo\\common.py", + "ast_data": "FunctionDef name:speedup_experiment_fx2trt arg:args arg:model_iter_fn arg:model arg:example_inputs arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_and_reset", + "source_code": "def get_and_reset(self) -> tuple[list[UsageData], list[str], list[str]]:\n copy_data = []\n copy_errors = []\n copy_logs = []\n with self._lock:\n copy_data = copy.deepcopy(self._data_list)\n copy_errors = copy.deepcopy(self._data_errors)\n copy_logs = copy.deepcopy(self._data_logs)\n self._data_list.clear()\n self._data_errors.clear()\n self._data_logs.clear()\n return (copy_data, copy_errors, copy_logs)", + "docstring": "get deepcopy of list of usageData and list of string errors", + "type": "method", + "file_path": "pytorch\\tools\\stats\\monitor.py", + "ast_data": "FunctionDef name:get_and_reset arg:self arguments arg Assign Assign Assign With Assign Call Assign Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "strip_et_fields", + "source_code": "def strip_et_fields(es: object) -> None:\n for entry in es:\n for field in ET_FIELDS:\n entry.pop(field, None)", + "docstring": "Given a loaded yaml representing a list of operators, remove ET specific fields from every entries for BC compatibility", + "type": "function", + "file_path": "pytorch\\torchgen\\executorch\\parse.py", + "ast_data": "FunctionDef name:strip_et_fields arg:es arguments arg For For Call" + }, + { + "library": "tensorflow", + "name": "_global_batch_size", + "source_code": "@property\ndef _global_batch_size(self):\n return True", + "docstring": "and use global batch size. assumes per-replica batching. Returns: Boolean.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_strategy.py", + "ast_data": "FunctionDef name:_global_batch_size arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "ParameterAlias", + "source_code": "class ParameterAlias:\n\n def __init__(self, alias_to):\n self.alias_to = alias_to\n\n def __repr__(self):\n return f'ParameterAlias[alias_to: {self.alias_to}]'", + "docstring": "Indicates that a parameter should alias the value of another parameter. When used in conjunction with a custom distribution, this allows fuzzed tensors to represent a broader range of behaviors. For example, the following sometimes produces Tensors which broadcast: Fuzzer( parameters=[ FuzzedParameter(\"x_len\", 4, 1024, distribution=\"uniform\"), # will either be size one, or match the size of . FuzzedParameter(\"y_len\", distribution={ 0.5: 1, 0.5: ParameterAlias(\"x_len\") }), ], tensors=[ FuzzedTensor(\"x\", size=(\"x_len\",)), FuzzedTensor(\"y\", size=(\"y_len\",)), ], ) Chains of alias' are allowed, but may not contain cycles.", + "type": "class", + "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\fuzzer.py", + "ast_data": "ClassDef name:ParameterAlias FunctionDef name:__init__ arg:self arg:alias_to arguments arg arg Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "translate_x", + "source_code": "def translate_x(min_mag: float, max_mag: float) -> OperationBase:\n if min_mag != -max_mag:\n raise ValueError(f'{TranslateX.__name__} is a symmetric operation that `- min_mag == max_mag`. Got [{min_mag}, {max_mag}]')\n return TranslateX(None, 1.0, magnitude_range=(0.0, max_mag), symmetric_megnitude=True)", + "docstring": "Return TranslateX op.", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\auto\\rand_augment\\ops.py", + "ast_data": "FunctionDef name:translate_x arg:min_mag arg:max_mag arguments arg arg If Compare Raise Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "reset", + "source_code": "def reset():\n return torch._C._lazy._clear_ir_cache()", + "docstring": "Clear TrieCache. This is needed in testing to avoid node reusing between different tests.", + "type": "function", + "file_path": "pytorch\\torch\\_lazy\\ir_cache.py", + "ast_data": "FunctionDef name:reset arguments Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_apply_sparse", + "source_code": "def _apply_sparse(self, grad, var):\n raise NotImplementedError()", + "docstring": "Add ops to apply sparse gradients to . The IndexedSlices object passed to in this function is by default pre-processed in to remove duplicate indices (see its docstring for details). Optimizers which can tolerate or have correct special cases for duplicate sparse indices may override instead of this function, avoiding that overhead. Args: grad: , with no repeated indices. var: A object. Returns: An .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py", + "ast_data": "FunctionDef name:_apply_sparse arg:self arg:grad arg:var arguments arg arg arg Raise Call" + }, + { + "library": "scipy", + "name": "rvs", + "source_code": "def rvs(self, row, col, *, size=None, method=None, random_state=None):\n r, c, n = self._process_parameters(row, col)\n size, shape = self._process_size_shape(size, r, c)\n random_state = self._get_random_state(random_state)\n meth = self._process_rvs_method(method, r, c, n)\n return meth(r, c, n, size, random_state).reshape(shape)", + "docstring": "Draw random tables with fixed column and row marginals. Parameters ---------- %(_doc_row_col)s size : integer, optional Number of samples to draw (default 1). method : str, optional Which method to use, \"boyett\" or \"patefield\". If None (default), selects the fastest method for this input. %(_doc_random_state)s Returns ------- rvs : ndarray Random 2D tables of shape (, , ). Notes ----- %(_doc_row_col_note)s Examples -------- >>> from scipy.stats import random_table >>> row = [1, 5] >>> col = [2, 3, 1] >>> random_table.rvs(row, col, random_state=123) array([[1., 0., 0.], [1., 3., 1.]]) Alternatively, the object may be called (as a function) to fix the row and column vector sums, returning a \"frozen\" distribution. >>> d = random_table(row, col) >>> d.rvs(random_state=123) array([[1., 0., 0.], [1., 3., 1.]])", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:rvs arg:self arg:row arg:col arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_TextToBytesWrapper", + "source_code": "class _TextToBytesWrapper(io.BufferedReader):\n\n def __init__(self, text_io_buffer, encoding=None, errors=None, **kwargs):\n super().__init__(text_io_buffer, **kwargs)\n self.encoding = encoding or text_io_buffer.encoding or 'utf-8'\n self.errors = errors or text_io_buffer.errors or 'strict'\n\n def __del__(self):\n self.detach()\n\n def _encoding_call(self, method_name, *args, **kwargs):\n raw_method = getattr(self.raw, method_name)\n val = raw_method(*args, **kwargs)\n return val.encode(self.encoding, errors=self.errors)\n\n def read(self, size=-1):\n return self._encoding_call('read', size)\n\n def read1(self, size=-1):\n return self._encoding_call('read1', size)\n\n def peek(self, size=-1):\n return self._encoding_call('peek', size)\n\n def seek(self, offset, whence=0):\n if offset == 0 and whence == 0 or (offset == 0 and whence == 2):\n super().seek(offset, whence)\n else:\n pass", + "docstring": "Convert a TextIOBase string stream to a byte stream.", + "type": "class", + "file_path": "scipy\\scipy\\io\\_fast_matrix_market\\__init__.py", + "ast_data": "ClassDef name:_TextToBytesWrapper FunctionDef name:__init__ arg:self arg:text_io_buffer arg:encoding arg:errors arguments arg arg arg arg arg Call Call Assign BoolOp Assign BoolOp FunctionDef name:__del__ arg:self arguments arg Call FunctionDef name:_encoding_call arg:self arg:method_name arguments arg arg arg arg Assign Call Assign Call Return return:yes Call FunctionDef name:read arg:self arg:size arguments arg arg Return return:yes Call FunctionDef name:read1 arg:self arg:size arguments arg arg Return return:yes Call FunctionDef name:peek arg:self arg:size arguments arg arg Return return:yes Call FunctionDef name:seek arg:self arg:offset arg:whence arguments arg arg arg If BoolOp BoolOp Compare Compare BoolOp Compare Compare Call Call" + }, + { + "library": "tensorflow", + "name": "_address", + "source_code": "@property\ndef _address(self) -> str:\n return 'localhost:{0}'.format(self._server.bound_port())", + "docstring": "Returns the address of the server. The returned string will be in the form address:port, e.g. \"localhost:1000\".", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py", + "ast_data": "FunctionDef name:_address arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, pad=0.3):\n self.pad = pad", + "docstring": "Parameters ---------- pad : float, default: 0.3 The amount of padding around the original box.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:pad arguments arg arg Assign" + }, + { + "library": "matplotlib", + "name": "_get_anchored_bbox", + "source_code": "def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):\n return offsetbox._get_anchored_bbox(loc, bbox, parentbbox, self.borderaxespad * renderer.points_to_pixels(self._fontsize))", + "docstring": "Place the *bbox* inside the *parentbbox* according to a given location code. Return the (x, y) coordinate of the bbox. Parameters ---------- loc : int A location code in range(1, 11). This corresponds to the possible values for `~matplotlib.transforms.Bbox~matplotlib.transforms.Bbox` A parent box which will contain the bbox, in display coordinates.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\legend.py", + "ast_data": "FunctionDef name:_get_anchored_bbox arg:self arg:loc arg:bbox arg:parentbbox arg:renderer arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_SymbolicException", + "source_code": "class _SymbolicException(Exception):\n pass", + "docstring": "Exception class to handle use of symbolic tensors when executing eagerly. creates symbolic tensors (in a FuncGraph managed by the Keras backend) while in eager execution. This exception is used to identify this case (raised in cause generated functions for ops to construct graphs instead of executing the kernel).", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\eager\\core.py", + "ast_data": "ClassDef name:_SymbolicException" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self) -> Tensor:\n rot = self.scale * angle_to_rotation_matrix(self.rot)\n out = convert_affinematrix_to_homography(torch.cat([rot, self.shift], dim=2))\n return out", + "docstring": "Single-batch similarity transform\". Returns: Similarity with shape :math:", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\transform\\image_registrator.py", + "ast_data": "FunctionDef name:forward arg:self arguments arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_from_shape", + "source_code": "@classmethod\ndef _from_shape(cls, shape: dynamic_ragged_shape.DynamicRaggedShape) -> 'StructuredTensor.Spec':\n return StructuredTensor.Spec(_ragged_shape=shape, _fields={})", + "docstring": "Creates the spec of an empty StructuredTensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py", + "ast_data": "FunctionDef name:_from_shape arg:cls arg:shape arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "get_expire_at_browser_close", + "source_code": "def get_expire_at_browser_close(self):\n if (expiry := self.get('_session_expiry')) is None:\n return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE\n return expiry == 0", + "docstring": "Return `` to find the actual expiry date/age, if there is one.", + "type": "method", + "file_path": "django\\django\\contrib\\sessions\\backends\\base.py", + "ast_data": "FunctionDef name:get_expire_at_browser_close arg:self arguments arg If Compare Call Return return:yes Return return:yes Compare" + }, + { + "library": "scipy", + "name": "_fit_loc_scale_support", + "source_code": "def _fit_loc_scale_support(self, data, *args):\n if isinstance(data, CensoredData):\n data = data._uncensor()\n else:\n data = np.asarray(data)\n loc_hat, scale_hat = self.fit_loc_scale(data, *args)\n self._argcheck(*args)\n _a, _b = self._get_support(*args)\n a, b = (_a, _b)\n support_width = b - a\n if support_width <= 0:\n return (loc_hat, scale_hat)\n a_hat = loc_hat + a * scale_hat\n b_hat = loc_hat + b * scale_hat\n data_a = np.min(data)\n data_b = np.max(data)\n if a_hat < data_a and data_b < b_hat:\n return (loc_hat, scale_hat)\n data_width = data_b - data_a\n rel_margin = 0.1\n margin = data_width * rel_margin\n if support_width < np.inf:\n loc_hat = data_a - a - margin\n scale_hat = (data_width + 2 * margin) / support_width\n return (loc_hat, scale_hat)\n if a > -np.inf:\n return (data_a - a - margin, 1)\n elif b < np.inf:\n return (data_b - b + margin, 1)\n else:\n raise RuntimeError", + "docstring": "Estimate loc and scale parameters from data accounting for support. Parameters ---------- data : array_like Data to fit. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). Returns ------- Lhat : float Estimated location parameter for the data. Shat : float Estimated scale parameter for the data.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:_fit_loc_scale_support arg:self arg:data arguments arg arg arg If Call Assign Call Assign Call Assign Call Call Assign Call Assign Assign If Compare Return return:yes Assign Assign Assign Call Assign Call If BoolOp Compare Compare Return return:yes Assign Assign Assign If Compare Assign Assign Return return:yes If Compare Return return:yes If Compare Return return:yes Raise" + }, + { + "library": "sphinx", + "name": "_sha1", + "source_code": "def _sha1(data: bytes=b'', **_kw: Any) -> hashlib._Hash:\n import hashlib\n return hashlib.sha1(data, usedforsecurity=False)", + "docstring": "Deprecated wrapper around hashlib.sha1 To be removed in Sphinx 9.0", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\__init__.py", + "ast_data": "FunctionDef name:_sha1 arg:data arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, features):\n self._features = features.copy()\n self._feature_tensors = {}", + "docstring": "Creates a . Args: features: A mapping from feature column to objects that are or , or can be converted to same via . A key signifies a base feature (not-transformed). A key means that this is the output of an existing which can be reused.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:features arguments arg arg Assign Call Assign" + }, + { + "library": "scrapy", + "name": "install_shutdown_handlers", + "source_code": "def install_shutdown_handlers(function: SignalHandlerT, override_sigint: bool=True) -> None:\n signal.signal(signal.SIGTERM, function)\n if signal.getsignal(signal.SIGINT) == signal.default_int_handler or override_sigint:\n signal.signal(signal.SIGINT, function)\n if hasattr(signal, 'SIGBREAK'):\n signal.signal(signal.SIGBREAK, function)", + "docstring": "Install the given function as a signal handler for all common shutdown signals (such as SIGINT, SIGTERM, etc). If `` the SIGINT handler won't be installed if there is already a handler in place (e.g. Pdb)", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\ossignal.py", + "ast_data": "FunctionDef name:install_shutdown_handlers arg:function arg:override_sigint arguments arg arg Call If BoolOp Compare Call Call If Call Call" + }, + { + "library": "tensorflow", + "name": "master", + "source_code": "def master(self, task_type=None, task_id=None, rpc_layer=None):\n if self._tpu != 'local':\n cluster_spec = self.cluster_spec()\n if task_type is not None and task_id is not None:\n master = cluster_spec.task_address(task_type, task_id)\n elif self.task_type is not None and self.task_id is not None:\n master = cluster_spec.task_address(self.task_type, self.task_id)\n else:\n job_tasks = cluster_spec.job_tasks(self.task_type)\n if not job_tasks:\n raise ValueError('No TPUs with the specified names exist.')\n master = job_tasks[0]\n return cluster_resolver_lib.format_master_url(master, 'grpc')\n else:\n return ''", + "docstring": "Get the Master string to be used for the session. In the normal case, this returns the grpc path (grpc://1.2.3.4:8470) of first instance in the ClusterSpec returned by the cluster_spec function. If a non-TPU name is used when constructing a TPUClusterResolver, that will be returned instead (e.g. If the tpus argument's value when constructing this TPUClusterResolver was 'grpc://10.240.1.2:8470', 'grpc://10.240.1.2:8470' will be returned). Args: task_type: (Optional, string) The type of the TensorFlow task of the master. task_id: (Optional, integer) The index of the TensorFlow task of the master. rpc_layer: (Optional, string) The RPC protocol TensorFlow should use to communicate with TPUs. Returns: string, the connection string to use when creating a session. Raises: ValueError: If none of the TPUs specified exists.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py", + "ast_data": "FunctionDef name:master arg:self arg:task_type arg:task_id arg:rpc_layer arguments arg arg arg arg If Compare Assign Call If BoolOp Compare Compare Assign Call If BoolOp Compare Compare Assign Call Assign Call If Raise Call Assign Return return:yes Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_get_estimator", + "source_code": "def _get_estimator(self):\n if self.estimator is None and self.base_estimator != 'deprecated':\n estimator_ = clone(self.base_estimator)\n warn('`base_estimator` has been deprecated in 1.6 and will be removed in 1.8. Please use `estimator` instead.', FutureWarning)\n elif self.estimator is None and self.base_estimator == 'deprecated':\n raise ValueError('You must pass an estimator to SelfTrainingClassifier. Use `estimator`.')\n elif self.estimator is not None and self.base_estimator != 'deprecated':\n raise ValueError('You must pass only one estimator to SelfTrainingClassifier. Use `estimator`.')\n else:\n estimator_ = clone(self.estimator)\n return estimator_", + "docstring": "Get the estimator. Returns ------- estimator_ : estimator object The cloned estimator object.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\semi_supervised\\_self_training.py", + "ast_data": "FunctionDef name:_get_estimator arg:self arguments arg If BoolOp Compare Compare Assign Call Call If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Raise Call Assign Call Return return:yes" + }, + { + "library": "authlib", + "name": "raise_error_response", + "source_code": "def raise_error_response(self, error):\n status = error.status_code\n body = json.dumps(dict(error.get_body()))\n headers = error.get_headers()\n raise_http_exception(status, body, headers)", + "docstring": "Raise HTTPException for OAuth2Error. Developers can re-implement this method to customize the error response. :param error: OAuth2Error :raise: HTTPException", + "type": "method", + "file_path": "authlib\\authlib\\integrations\\flask_oauth2\\resource_protector.py", + "ast_data": "FunctionDef name:raise_error_response arg:self arg:error arguments arg arg Assign Assign Call Call Call Assign Call Call" + }, + { + "library": "pytorch", + "name": "named_buffers", + "source_code": "def named_buffers(self, prefix: str='', recurse: bool=True, remove_duplicate: bool=True) -> Iterator[tuple[str, Tensor]]:\n gen = self._named_members(lambda module: module._buffers.items(), prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate)\n yield from gen", + "docstring": "Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself. Args: prefix (str): prefix to prepend to all buffer names. recurse (bool, optional): if True, then yields buffers of this module and all submodules. Otherwise, yields only buffers that are direct members of this module. Defaults to True. remove_duplicate (bool, optional): whether to remove the duplicated buffers in the result. Defaults to True. Yields: (str, torch.Tensor): Tuple containing the name and buffer Example:: >>> # xdoctest: +SKIP(\"undefined vars\") >>> for name, buf in self.named_buffers(): >>> if name in ['running_var']: >>> print(buf.size())", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:named_buffers arg:self arg:prefix arg:recurse arg:remove_duplicate arguments arg arg arg arg Assign Call arguments arg Call" + }, + { + "library": "tensorflow", + "name": "keras_style_scope", + "source_code": "@tf_contextlib.contextmanager\ndef keras_style_scope():\n global _KERAS_STYLE_SCOPE\n stack = _KERAS_STYLE_SCOPE\n _KERAS_STYLE_SCOPE = True\n try:\n yield\n finally:\n _KERAS_STYLE_SCOPE = stack", + "docstring": "Use Keras-style variable management. All tf.layers and tf RNN cells created in this scope use Keras-style variable management. Creating such layers with a scope= argument is disallowed, and reuse=True is disallowed. The purpose of this scope is to allow users of existing layers to slowly transition to a Keras layers API without breaking existing functionality. One example of this is when using TensorFlow's RNN classes with Keras Models or Networks. Because Keras models do not properly set variable scopes, users of RNNs may either accidentally share scopes between two different models, or get errors about variables that already exist. Example: The solution is to wrap the model construction and execution in a keras-style scope: Yields: A keras layer style scope.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\base.py", + "ast_data": "FunctionDef name:keras_style_scope arguments Assign Assign Try Assign" + }, + { + "library": "pytorch", + "name": "_intern_module", + "source_code": "def _intern_module(self, module_name: str, dependencies: bool):\n module_obj = self._import_module(module_name)\n module_name = demangle(module_name)\n is_package = hasattr(module_obj, '__path__')\n source = self._get_source_of_module(module_obj)\n if source is None:\n filename = getattr(module_obj, '__file__', None)\n error_context = None\n if filename is None:\n packaging_error = PackagingErrorReason.NO_DUNDER_FILE\n elif filename.endswith(tuple(importlib.machinery.EXTENSION_SUFFIXES)):\n packaging_error = PackagingErrorReason.IS_EXTENSION_MODULE\n else:\n packaging_error = PackagingErrorReason.SOURCE_FILE_NOT_FOUND\n error_context = f'filename: {filename}'\n self.dependency_graph.add_node(module_name, action=_ModuleProviderAction.INTERN, is_package=is_package, error=packaging_error, error_context=error_context, provided=True)\n return\n self.dependency_graph.add_node(module_name, action=_ModuleProviderAction.INTERN, is_package=is_package, source=source, provided=True)\n if dependencies:\n deps = self._get_dependencies(source, module_name, is_package)\n for dep in deps:\n self.dependency_graph.add_edge(module_name, dep)\n self.add_dependency(dep)", + "docstring": "Adds the module to the dependency graph as an interned module, along with any metadata needed to write it out to the zipfile at serialization time.", + "type": "method", + "file_path": "pytorch\\torch\\package\\package_exporter.py", + "ast_data": "FunctionDef name:_intern_module arg:self arg:module_name arg:dependencies arguments arg arg arg Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Assign If Compare Assign If Call Call Assign Assign Assign Call Return return:no Call If Assign Call For Call Call" + }, + { + "library": "kornia", + "name": "channels_order", + "source_code": "@property\ndef channels_order(self) -> ChannelsOrder:\n return self.layout.channels_order", + "docstring": "Return the channels order.", + "type": "method", + "file_path": "kornia\\kornia\\image\\image.py", + "ast_data": "FunctionDef name:channels_order arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "score", + "source_code": "def score(self, X, y=None, **params):\n _check_refit(self, 'score')\n check_is_fitted(self)\n _raise_for_params(params, self, 'score')\n if _routing_enabled():\n score_params = process_routing(self, 'score', **params).scorer['score']\n else:\n score_params = dict()\n if self.scorer_ is None:\n raise ValueError(\"No score function explicitly defined, and the estimator doesn't provide one %s\" % self.best_estimator_)\n if isinstance(self.scorer_, dict):\n if self.multimetric_:\n scorer = self.scorer_[self.refit]\n else:\n scorer = self.scorer_\n return scorer(self.best_estimator_, X, y, **score_params)\n score = self.scorer_(self.best_estimator_, X, y, **score_params)\n if self.multimetric_:\n score = score[self.refit]\n return score", + "docstring": "Return the score on the given data, if the estimator has been refit. This uses the score defined by `n_samplesn_featuresenable_metadata_routing=TrueMetadata Routing User Guide ` method otherwise.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py", + "ast_data": "FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg arg Call Call Call If Call Assign Call Assign Call If Compare Raise Call If Call If Assign Assign Return return:yes Call Assign Call If Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "tick_values", + "source_code": "def tick_values(self, vmin, vmax):\n raise NotImplementedError('Derived must override')", + "docstring": "Return the values of the located ticks given **vmin** and **vmax**. .. note:: To get tick locations with the vmin and vmax values defined automatically for the associated `` simply call the Locator instance:: >>> print(type(loc)) >>> print(loc()) [1, 2, 3, 4]", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:tick_values arg:self arg:vmin arg:vmax arguments arg arg arg Raise Call" + }, + { + "library": "pandas", + "name": "_get_time_micros", + "source_code": "def _get_time_micros(self) -> npt.NDArray[np.int64]:\n values = self._data._local_timestamps()\n ppd = periods_per_day(self._data._creso)\n frac = values % ppd\n if self.unit == 'ns':\n micros = frac // 1000\n elif self.unit == 'us':\n micros = frac\n elif self.unit == 'ms':\n micros = frac * 1000\n elif self.unit == 's':\n micros = frac * 1000000\n else:\n raise NotImplementedError(self.unit)\n micros[self._isnan] = -1\n return micros", + "docstring": "Return the number of microseconds since midnight. Returns ------- ndarray[int64_t]", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\datetimes.py", + "ast_data": "FunctionDef name:_get_time_micros arg:self arguments arg Assign Call Assign Call Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Raise Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "paste_osx_pyobjc", + "source_code": "def paste_osx_pyobjc():\n board = AppKit.NSPasteboard.generalPasteboard()\n content = board.stringForType_(AppKit.NSStringPboardType)\n return content", + "docstring": "Returns contents of clipboard", + "type": "function", + "file_path": "pandas\\pandas\\io\\clipboard\\__init__.py", + "ast_data": "FunctionDef name:paste_osx_pyobjc arguments Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "initializer", + "source_code": "@property\ndef initializer(self):\n return self.initialize()", + "docstring": "Returns a list of ops that initialize the iterator.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py", + "ast_data": "FunctionDef name:initializer arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "map_structure_with_atomic", + "source_code": "def map_structure_with_atomic(is_atomic_fn, map_fn, nested):\n if is_atomic_fn(nested):\n return map_fn(nested)\n if not nest.is_nested(nested):\n raise ValueError('Received non-atomic and non-sequence element: {}'.format(nested))\n if nest.is_mapping(nested):\n values = [nested[k] for k in sorted(nested.keys())]\n elif nest.is_attrs(nested):\n values = _astuple(nested)\n else:\n values = nested\n mapped_values = [map_structure_with_atomic(is_atomic_fn, map_fn, ele) for ele in values]\n return nest._sequence_like(nested, mapped_values)", + "docstring": "Maps the atomic elements of a nested structure. Args: is_atomic_fn: A function that determines if an element of is atomic. map_fn: The function to apply to atomic elements of . nested: A nested structure. Returns: The nested structure, with atomic elements mapped according to . Raises: ValueError: If an element that is neither atomic nor a sequence is encountered.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py", + "ast_data": "FunctionDef name:map_structure_with_atomic arg:is_atomic_fn arg:map_fn arg:nested arguments arg arg arg If Call Return return:yes Call If Call Raise Call Call If Call Assign Call Call If Call Assign Call Assign Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_UnpackGrad", + "source_code": "@ops.RegisterGradient('Unpack')\ndef _UnpackGrad(op: ops.Operation, *grads):\n return array_ops_stack.stack(grads, axis=op.get_attr('axis'))", + "docstring": "Gradient for unpack op.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py", + "ast_data": "FunctionDef name:_UnpackGrad arg:op arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "django", + "name": "SuspiciousOperation", + "source_code": "class SuspiciousOperation(Exception):\n pass", + "docstring": "The user did something suspicious", + "type": "class", + "file_path": "django\\django\\core\\exceptions.py", + "ast_data": "ClassDef name:SuspiciousOperation" + }, + { + "library": "django", + "name": "delete_many", + "source_code": "def delete_many(self, keys, version=None):\n for key in keys:\n self.delete(key, version=version)", + "docstring": "Delete a bunch of values in the cache at once. For certain backends (memcached), this is much more efficient than calling delete() multiple times.", + "type": "method", + "file_path": "django\\django\\core\\cache\\backends\\base.py", + "ast_data": "FunctionDef name:delete_many arg:self arg:keys arg:version arguments arg arg arg For Call" + }, + { + "library": "pytorch", + "name": "next", + "source_code": "def next(self, filename: str, lineno: int, instruction_pointer, inst) -> SpeculationEntry:\n if len(self.entries) == self.index:\n self.entries.append(SpeculationEntry(filename, lineno, instruction_pointer, inst))\n entry = self.entries[self.index]\n prev_entry_msg = ''\n if self.index != 0:\n prev_entry = self.entries[self.index - 1]\n prev_entry_msg = f'Previous instruction: {prev_entry.filename}:{prev_entry.lineno}({prev_entry.inst.opname} @ {prev_entry.instruction_pointer})\\n'\n if not (entry.instruction_pointer == instruction_pointer and entry.filename == filename and (entry.lineno == lineno)):\n raise SpeculationLogDivergence(f'\\nSpeculationLog diverged at index {self.index} (log had {len(self.entries)} entries):\\n- Expected: {entry.filename}:{entry.lineno} ({entry.inst.opname} at ip={entry.instruction_pointer})\\n- Actual: {filename}:{lineno} ({inst.opname} at ip={instruction_pointer})\\n{prev_entry_msg}\\nThere are two usual reasons why this may have occured:\\n- When Dynamo analysis restarted, the second run took a different path than\\n the first. If this occurred, the previous instruction is the critical instruction that\\n behaved differently.\\n- Speculation entries are only added under certain conditions (as seen in\\n step()), e.g., there must exist operators in the graph; those conditions may\\n have changed on restart.\\n\\nIf this divergence was intentional, clear the speculation log before restarting (do NOT\\ndo this for graph breaks, you will infinite loop).\\n\\nOtherwise, please submit a bug report, ideally including the contents of TORCH_LOGS=+dynamo\\n')\n self.index += 1\n return entry", + "docstring": "Lookup or create a SpeculationEntry() that is shared across RestartAnalysis calls. Args are used only for debug checks.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\symbolic_convert.py", + "ast_data": "FunctionDef name:next arg:self arg:filename arg:lineno arg:instruction_pointer arg:inst arguments arg arg arg arg arg If Compare Call Call Call Assign Assign If Compare Assign Assign If BoolOp Compare Compare Compare Raise Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "_get_op", + "source_code": "def _get_op(self, data_key: DataKey) -> Type[SequentialOpsInterface[Any]]:\n if data_key == DataKey.INPUT:\n return InputSequentialOps\n if data_key == DataKey.MASK:\n return MaskSequentialOps\n if data_key in {DataKey.BBOX, DataKey.BBOX_XYWH, DataKey.BBOX_XYXY}:\n return BoxSequentialOps\n if data_key == DataKey.KEYPOINTS:\n return KeypointSequentialOps\n if data_key == DataKey.CLASS:\n return ClassSequentialOps\n raise RuntimeError(f'Operation for `{data_key.name}` is not found.')", + "docstring": "Return the corresponding operation given a data key.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\ops.py", + "ast_data": "FunctionDef name:_get_op arg:self arg:data_key arguments arg arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call" + }, + { + "library": "pytorch", + "name": "get_class_if_classified_error", + "source_code": "def get_class_if_classified_error(e: Exception) -> Optional[str]:\n from torch._dynamo.exc import TorchRuntimeError, Unsupported, UserError\n ALWAYS_CLASSIFIED = 'always_classified'\n DEFAULT_CLASS_SIGIL = 'case_name'\n _ALLOW_LIST = {Unsupported: DEFAULT_CLASS_SIGIL, UserError: DEFAULT_CLASS_SIGIL, TorchRuntimeError: None}\n if type(e) in _ALLOW_LIST:\n attr_name = _ALLOW_LIST[type(e)]\n if attr_name is None:\n return ALWAYS_CLASSIFIED\n return getattr(e, attr_name, None)\n return None", + "docstring": "Returns a string case name if the export error e is classified. Returns None otherwise.", + "type": "function", + "file_path": "pytorch\\torch\\_export\\db\\logging.py", + "ast_data": "FunctionDef name:get_class_if_classified_error arg:e arguments arg Assign Assign Assign If Compare Call Assign Call If Compare Return return:yes Return return:yes Call Return return:no" + }, + { + "library": "scikit-learn", + "name": "NotFittedError", + "source_code": "class NotFittedError(ValueError, AttributeError):\n pass", + "docstring": "Exception class to raise if estimator is used before fitting. This class inherits from both ValueError and AttributeError to help with exception handling and backward compatibility. Examples -------- >>> from sklearn.svm import LinearSVC >>> from sklearn.exceptions import NotFittedError >>> try: ... LinearSVC().predict([[1, 2], [2, 3], [3, 4]]) ... except NotFittedError as e: ... print(repr(e)) NotFittedError(\"This LinearSVC instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.\"...) .. versionchanged:: 0.18 Moved from sklearn.utils.validation.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\exceptions.py", + "ast_data": "ClassDef name:NotFittedError" + }, + { + "library": "tensorflow", + "name": "multiplier", + "source_code": "@property\ndef multiplier(self):\n return self._multiplier", + "docstring": "The [batch] scalar , in .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_identity.py", + "ast_data": "FunctionDef name:multiplier arg:self arguments arg Return return:yes" + }, + { + "library": "pygame", + "name": "empty", + "source_code": "def empty(self):\n for sprite in self.sprites():\n self.remove_internal(sprite)\n sprite.remove_internal(self)", + "docstring": "remove all sprites Group.empty(): return None Removes all the sprites from the group.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:empty arg:self arguments arg For Call Call Call" + }, + { + "library": "django", + "name": "RightLookup", + "source_code": "@BaseSpatialField.register_lookup\nclass RightLookup(GISLookup):\n lookup_name = 'right'", + "docstring": "The 'right' operator returns true if A's bounding box is strictly to the right of B's bounding box.", + "type": "class", + "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py", + "ast_data": "ClassDef name:RightLookup Assign" + }, + { + "library": "scipy", + "name": "isminor", + "source_code": "def isminor(x, ref):\n sensitivity = 0.1\n refa = abs(ref) + sensitivity * abs(x)\n refb = abs(ref) + 2 * sensitivity * abs(x)\n return np.logical_or(abs(ref) >= refa, refa >= refb)", + "docstring": "This function tests whether x is minor compared to ref. It is used by Powell, e.g., in COBYLA. In precise arithmetic, isminor(x, ref) is true if and only if x == 0; in floating point arithmetic, isminor(x, ref) is true if x is 0 or its nonzero value can be attributed to computer rounding errors according to ref. Larger sensitivity means the function is more strict/precise, the value 0.1 being due to Powell. For example: isminor(1e-20, 1e300) -> True, because in floating point arithmetic 1e-20 cannot be added to 1e300 without being rounded to 1e300. isminor(1e300, 1e-20) -> False, because in floating point arithmetic adding 1e300 to 1e-20 dominates the latter number. isminor(3, 4) -> False, because 3 can be added to 4 without being rounded off", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\linalg.py", + "ast_data": "FunctionDef name:isminor arg:x arg:ref arguments arg arg Assign Assign Call Call Assign Call Call Return return:yes Call Compare Call Compare" + }, + { + "library": "matplotlib", + "name": "add_figure", + "source_code": "def add_figure(self, figure):\n if figure not in self.views:\n self.views[figure] = cbook._Stack()\n self.positions[figure] = cbook._Stack()\n self.home_views[figure] = WeakKeyDictionary()\n self.push_current(figure)\n figure.add_axobserver(lambda fig: self.update_home_views(fig))", + "docstring": "Add the current figure to the stack of views and positions.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "FunctionDef name:add_figure arg:self arg:figure arguments arg arg If Compare Assign Call Assign Call Assign Call Call Call arguments arg Call" + }, + { + "library": "tensorflow", + "name": "clear_kernel_cache", + "source_code": "def clear_kernel_cache(self):\n if self._context_handle is not None:\n pywrap_tfe.TFE_ContextClearCaches(self._context_handle)", + "docstring": "Clear kernel cache and reset all stateful kernels.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:clear_kernel_cache arg:self arguments arg If Compare Call" + }, + { + "library": "pandas", + "name": "to_arrays", + "source_code": "def to_arrays(data, columns: Index | None, dtype: DtypeObj | None=None) -> tuple[list[ArrayLike], Index]:\n if not len(data):\n if isinstance(data, np.ndarray):\n if data.dtype.names is not None:\n columns = ensure_index(data.dtype.names)\n arrays = [data[name] for name in columns]\n if len(data) == 0:\n for i, arr in enumerate(arrays):\n if arr.ndim == 2:\n arrays[i] = arr[:, 0]\n return (arrays, columns)\n return ([], ensure_index([]))\n elif isinstance(data, np.ndarray) and data.dtype.names is not None:\n if columns is None:\n columns = Index(data.dtype.names)\n arrays = [data[k] for k in columns]\n return (arrays, columns)\n if isinstance(data[0], (list, tuple)):\n arr = _list_to_arrays(data)\n elif isinstance(data[0], abc.Mapping):\n arr, columns = _list_of_dict_to_arrays(data, columns)\n elif isinstance(data[0], ABCSeries):\n arr, columns = _list_of_series_to_arrays(data, columns)\n else:\n data = [tuple(x) for x in data]\n arr = _list_to_arrays(data)\n content, columns = _finalize_columns_and_data(arr, columns, dtype)\n return (content, columns)", + "docstring": "Return list of arrays, columns. Returns ------- list[ArrayLike] These will become columns in a DataFrame. Index This will become frame.columns. Notes ----- Ensures that len(result_arrays) == len(result_index).", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\construction.py", + "ast_data": "FunctionDef name:to_arrays arg:data arg:columns arg:dtype arguments arg arg arg If Call If Call If Compare Assign Call Assign If Compare Call For Call If Compare Assign Return return:yes Return return:yes Call If BoolOp Call Compare If Compare Assign Call Assign Return return:yes If Call Assign Call If Call Assign Call If Call Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "op_priority", + "source_code": "def op_priority(op_type):\n if op_type in ('Const', 'Shape', 'BroadcastGradientArgs', 'Range', 'VariableShape', 'Fill', 'OneHot', 'ShapeN'):\n return 7\n if op_type in ('Identity', 'Cast', 'Reshape', 'ExpandDims', 'StopGradient', 'PreventGradient', 'Squeeze', 'Gather', 'GatherNd'):\n return 6\n if op_type in ('ConcatV2', 'Concat', 'StridedSlice', 'Slice', 'Pack', 'Tile', 'CollectivePermute', 'SplitV', 'DynamicPartition'):\n return 5\n if op_type in ('Pad', 'RandomUniformInt', 'GreaterEqual'):\n return 4\n if op_type in ('Sum', 'AddV2', 'Add', 'AddN', 'BiasAdd', 'CrossReplicaSum'):\n return 3\n if op_type in ('Neg', 'Sub'):\n return 2\n if op_type in ('Mul', 'Square', 'MatMul', 'RandomUniform', 'Select', 'Maximum', 'Mean', 'Variance', 'Exp', 'Rsqrt'):\n return 1\n return 2", + "docstring": "Returns the priority of the op. If the priority of the op is k, it will be traced if trace_level>=k. Args: op_type: String name of the operation type. Returns: Integer value corresponding the priority of the op.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:op_priority arg:op_type arguments arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Return return:yes" + }, + { + "library": "scipy", + "name": "Problem12", + "source_code": "class Problem12(Benchmark):\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n self._bounds = [(0, 2 * pi)]\n self.global_optimum = pi\n self.fglob = -1\n\n def fun(self, x, *args):\n self.nfev += 1\n x = x[0]\n return sin(x) ** 3.0 + cos(x) ** 3.0", + "docstring": "Univariate Problem12 objective function. This class defines the Univariate Problem12 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem12}}(x) = \\sin^3(x) + \\cos^3(x) Bound constraints: :math: .. figure:: figures/Problem12.png :alt: Univariate Problem12 function :align: center **Univariate Problem12 function** *Global optimum*: :math: for :math:", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py", + "ast_data": "ClassDef name:Problem12 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "ResourceExhaustedError", + "source_code": "@tf_export('errors.ResourceExhaustedError')\nclass ResourceExhaustedError(OpError):\n\n def __init__(self, node_def, op, message, *args):\n super(ResourceExhaustedError, self).__init__(node_def, op, message, RESOURCE_EXHAUSTED, *args)", + "docstring": "Raised when some resource has been exhausted while running operation. For example, this error might be raised if a per-user quota is exhausted, or perhaps the entire file system is out of space. If running into due to out of memory (OOM), try to use smaller batch size or reduce dimension size of model weights.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", + "ast_data": "ClassDef name:ResourceExhaustedError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "_solvevec", + "source_code": "def _solvevec(self, rhs, adjoint=False):\n rhs_mat = array_ops.expand_dims(rhs, axis=-1)\n solution_mat = self.solve(rhs_mat, adjoint=adjoint)\n return array_ops.squeeze(solution_mat, axis=-1)", + "docstring": "Default implementation of _solvevec.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:_solvevec arg:self arg:rhs arg:adjoint arguments arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_is_scalar_helper", + "source_code": "def _is_scalar_helper(self, static_shape, dynamic_shape_fn):\n if static_shape.ndims is not None:\n return static_shape.ndims == 0\n shape = dynamic_shape_fn()\n if shape.get_shape().ndims is not None and shape.get_shape().dims[0].value is not None:\n return shape.get_shape().as_list() == [0]\n return math_ops.equal(array_ops.shape(shape)[0], 0)", + "docstring": "Implementation for and .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py", + "ast_data": "FunctionDef name:_is_scalar_helper arg:self arg:static_shape arg:dynamic_shape_fn arguments arg arg arg If Compare Return return:yes Compare Assign Call If BoolOp Compare Call Compare Call Return return:yes Compare Call Call Return return:yes Call Call" + }, + { + "library": "authlib", + "name": "validate_token_endpoint_auth_signing_alg_values_supported", + "source_code": "def validate_token_endpoint_auth_signing_alg_values_supported(self):\n _validate_alg_values(self, 'token_endpoint_auth_signing_alg_values_supported', self.token_endpoint_auth_methods_supported)", + "docstring": "OPTIONAL. JSON array containing a list of the JWS signing algorithms (\"alg\" values) supported by the token endpoint for the signature on the JWT [JWT] used to authenticate the client at the token endpoint for the \"private_key_jwt\" and \"client_secret_jwt\" authentication methods. This metadata entry MUST be present if either of these authentication methods are specified in the \"token_endpoint_auth_methods_supported\" entry. No default algorithms are implied if this entry is omitted. Servers SHOULD support \"RS256\". The value \"none\" MUST NOT be used.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py", + "ast_data": "FunctionDef name:validate_token_endpoint_auth_signing_alg_values_supported arg:self arguments arg Call" + }, + { + "library": "pytorch", + "name": "forward", + "source_code": "def forward(self, x: torch.Tensor) -> torch.Tensor:\n weight_quant_dequant = self.get_weight()\n result = F.conv1d(x, weight_quant_dequant, self.bias, self.stride, self.padding, self.dilation, self.groups)\n return result", + "docstring": "we have: w(float) -- quant - dequant x(float) ------------- F.conv1d --- In the full model, we will see w(float) -- quant - *dequant x -- quant --- *dequant -- *F.conv1d --- *quant - dequant and the backend should be able to fuse the ops with into a quantized conv1d", + "type": "method", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\conv.py", + "ast_data": "FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, sync_optimizer, is_chief, num_tokens):\n self._sync_optimizer = sync_optimizer\n self._is_chief = is_chief\n self._num_tokens = num_tokens", + "docstring": "Creates hook to handle SyncReplicasOptimizer initialization ops. Args: sync_optimizer: which this hook will initialize. is_chief: , whether is this a chief replica or not. num_tokens: Number of tokens to add to the queue.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\sync_replicas_optimizer.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:sync_optimizer arg:is_chief arg:num_tokens arguments arg arg arg arg Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "int", + "source_code": "def int(self):\n _warn_typed_storage_removal()\n return self._to(torch.int)", + "docstring": "Casts this storage to int type.", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:int arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_unregister_erase_node_hook", + "source_code": "def _unregister_erase_node_hook(self, f):\n assert callable(f), 'erase_node hook must be a callable.'\n self._erase_node_hooks.remove(f)", + "docstring": "Takes a callable which was previously registered to be called after we erase a node. This function will unregister that callable so it is no longer invoked on node erasure.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\graph_module.py", + "ast_data": "FunctionDef name:_unregister_erase_node_hook arg:self arg:f arguments arg arg Call Call" + }, + { + "library": "scrapy", + "name": "get_spec", + "source_code": "def get_spec(func: Callable[..., Any]) -> tuple[list[str], dict[str, Any]]:\n if inspect.isfunction(func) or inspect.ismethod(func):\n spec = inspect.getfullargspec(func)\n elif hasattr(func, '__call__'):\n spec = inspect.getfullargspec(func.__call__)\n else:\n raise TypeError(f'{type(func)} is not callable')\n defaults: tuple[Any, ...] = spec.defaults or ()\n firstdefault = len(spec.args) - len(defaults)\n args = spec.args[:firstdefault]\n kwargs = dict(zip(spec.args[firstdefault:], defaults))\n return (args, kwargs)", + "docstring": "Returns (args, kwargs) tuple for a function >>> import re >>> get_spec(re.match) (['pattern', 'string'], {'flags': 0}) >>> class Test: ... def __call__(self, val): ... pass ... def method(self, val, flags=0): ... pass >>> get_spec(Test) (['self', 'val'], {}) >>> get_spec(Test.method) (['self', 'val'], {'flags': 0}) >>> get_spec(Test().method) (['self', 'val'], {'flags': 0})", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\python.py", + "ast_data": "FunctionDef name:get_spec arg:func arguments arg If BoolOp Call Call Assign Call If Call Assign Call Raise Call Call BoolOp Assign Call Call Assign Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "AggregateProfile", + "source_code": "class AggregateProfile(object):\n\n def __init__(self, profile_datum):\n self.total_op_time = profile_datum.op_time\n self.total_exec_time = profile_datum.exec_time\n device_and_node = '%s:%s' % (profile_datum.device_name, profile_datum.node_exec_stats.node_name)\n self._node_to_exec_count = {device_and_node: 1}\n\n def add(self, profile_datum):\n self.total_op_time += profile_datum.op_time\n self.total_exec_time += profile_datum.exec_time\n device_and_node = '%s:%s' % (profile_datum.device_name, profile_datum.node_exec_stats.node_name)\n device_and_node = '%s:%s' % (profile_datum.device_name, profile_datum.node_exec_stats.node_name)\n if device_and_node in self._node_to_exec_count:\n self._node_to_exec_count[device_and_node] += 1\n else:\n self._node_to_exec_count[device_and_node] = 1\n\n @property\n def node_count(self):\n return len(self._node_to_exec_count)\n\n @property\n def node_exec_count(self):\n return sum(self._node_to_exec_count.values())", + "docstring": "Profile summary data for aggregating a number of ProfileDatum.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\profiling.py", + "ast_data": "ClassDef name:AggregateProfile FunctionDef name:__init__ arg:self arg:profile_datum arguments arg arg Assign Assign Assign Assign FunctionDef name:add arg:self arg:profile_datum arguments arg arg Assign Assign If Compare Assign FunctionDef name:node_count arg:self arguments arg Return return:yes Call FunctionDef name:node_exec_count arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "slice", + "source_code": "def slice(self, begin, end):\n if begin < 0 or end < 0:\n raise ValueError('Encountered negative index.')\n lines = self.lines[begin:end]\n font_attr_segs = {}\n for key in self.font_attr_segs:\n if key >= begin and key < end:\n font_attr_segs[key - begin] = self.font_attr_segs[key]\n annotations = {}\n for key in self.annotations:\n if not isinstance(key, int):\n annotations[key] = self.annotations[key]\n elif key >= begin and key < end:\n annotations[key - begin] = self.annotations[key]\n return RichTextLines(lines, font_attr_segs=font_attr_segs, annotations=annotations)", + "docstring": "Slice a RichTextLines object. The object itself is not changed. A sliced instance is returned. Args: begin: (int) Beginning line index (inclusive). Must be >= 0. end: (int) Ending line index (exclusive). Must be >= 0. Returns: (RichTextLines) Sliced output instance of RichTextLines. Raises: ValueError: If begin or end is negative.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", + "ast_data": "FunctionDef name:slice arg:self arg:begin arg:end arguments arg arg arg If BoolOp Compare Compare Raise Call Assign Assign For If BoolOp Compare Compare Assign Assign For If Call Assign If BoolOp Compare Compare Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "device_pixel_ratio", + "source_code": "@property\ndef device_pixel_ratio(self):\n return self._device_pixel_ratio", + "docstring": "The ratio of physical to logical pixels used for the canvas on screen. By default, this is 1, meaning physical and logical pixels are the same size. Subclasses that support High DPI screens may set this property to indicate that said ratio is different. All Matplotlib interaction, unless working directly with the canvas, remains in logical pixels.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:device_pixel_ratio arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_3d_properties", + "source_code": "def set_3d_properties(self, zs, zdir, axlim_clip=False):\n self.update_scalarmappable()\n offsets = self.get_offsets()\n if len(offsets) > 0:\n xs, ys = offsets.T\n else:\n xs = []\n ys = []\n self._zdir = zdir\n self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)\n self._sizes3d = self._sizes\n self._linewidths3d = np.array(self._linewidths)\n xs, ys, zs = self._offsets3d\n self._z_markers_idx = slice(-1)\n self._vzs = None\n self._axlim_clip = axlim_clip\n self.stale = True", + "docstring": "Set the *z* positions and direction of the paths. Parameters ---------- zs : float or array of floats The location or locations to place the paths in the collection along the *zdir* axis. zdir : {'x', 'y', 'z'} Plane to plot paths orthogonal to. All paths must have the same direction. See for a description of the values. axlim_clip : bool, default: False Whether to hide paths with a vertex outside the axes view limits. .. versionadded:: 3.10", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:set_3d_properties arg:self arg:zs arg:zdir arg:axlim_clip arguments arg arg arg arg Call Assign Call If Compare Call Assign Assign Assign Assign Assign Call Call Assign Assign Call Assign Assign Call Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "from_concrete_functions", + "source_code": "@classmethod\ndef from_concrete_functions(cls, funcs, trackable_obj=None):\n TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.TF_CONCRETE_FUNCTIONS)\n if trackable_obj is None:\n logging.warning('Please consider providing the trackable_obj argument in the from_concrete_functions. Providing without the trackable_obj argument is deprecated and it will use the deprecated conversion path.')\n for func in funcs:\n if not isinstance(func, _function.ConcreteFunction):\n message = 'This function takes in a list of ConcreteFunction.'\n if isinstance(func, _def_function.Function):\n message += ' To get the ConcreteFunction from a Function, call get_concrete_function.'\n raise ValueError(message)\n return cls(funcs, trackable_obj)", + "docstring": "Creates a TFLiteConverter object from ConcreteFunctions. Args: funcs: List of TensorFlow ConcreteFunctions. The list should not contain duplicate elements. Currently converter can only convert a single ConcreteFunction. Converting multiple functions is under development. trackable_obj: An object (typically ) associated with . A reference to this object needs to be maintained so that Variables do not get garbage collected since functions have a weak reference to Variables. Returns: TFLiteConverter object. Raises: Invalid input type.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:from_concrete_functions arg:cls arg:funcs arg:trackable_obj arguments arg arg arg Call If Compare Call For If Call Assign If Call Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "log_first_n", + "source_code": "@tf_export(v1=['logging.log_first_n'])\ndef log_first_n(level, msg, n, *args):\n count = _GetNextLogCountPerToken(_GetFileAndLine())\n log_if(level, msg, count < n, *args)", + "docstring": "Log 'msg % args' at level 'level' only first 'n' times. Not threadsafe. Args: level: The level at which to log. msg: The message to be logged. n: The number of times this should be called before it is logged. *args: The args to be substituted into the msg.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\platform\\tf_logging.py", + "ast_data": "FunctionDef name:log_first_n arg:level arg:msg arg:n arguments arg arg arg arg Assign Call Call Call Compare Call" + }, + { + "library": "django", + "name": "__acall__", + "source_code": "async def __acall__(self, request):\n response = None\n if hasattr(self, 'process_request'):\n response = await sync_to_async(self.process_request, thread_sensitive=True)(request)\n response = response or await self.get_response(request)\n if hasattr(self, 'process_response'):\n response = await sync_to_async(self.process_response, thread_sensitive=True)(request, response)\n return response", + "docstring": "Async version of __call__ that is swapped in when an async request is running.", + "type": "method", + "file_path": "django\\django\\utils\\deprecation.py", + "ast_data": "AsyncFunctionDef name:__acall__ arg:self arg:request arguments arg arg Assign If Call Assign Call Call Assign BoolOp Call If Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "remainder", + "source_code": "def remainder(self, x0: T, x1: T) -> T:\n raise NotImplementedError", + "docstring": "Python-style modulus, take sign from RHS (x1).", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ops_handler.py", + "ast_data": "FunctionDef name:remainder arg:self arg:x0 arg:x1 arguments arg arg arg Raise" + }, + { + "library": "tensorflow", + "name": "wait_for_stop", + "source_code": "def wait_for_stop(self, timeout=None):\n return self._stop_event.wait(timeout)", + "docstring": "Wait till the Coordinator is told to stop. Args: timeout: Float. Sleep for up to that many seconds waiting for should_stop() to become True. Returns: True if the Coordinator is told stop, False if the timeout expired.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py", + "ast_data": "FunctionDef name:wait_for_stop arg:self arg:timeout arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "start", + "source_code": "def start(self):\n with self._process_lock:\n if self._processes:\n raise ValueError('MultiProcessRunner already started.')\n if self._joined:\n raise ValueError('cannot start new processes afterMultiProcessRunner.join() is called')\n for task_type, addresses in self._cluster_spec.items():\n for task_id, _ in enumerate(addresses):\n self._start_subprocess_and_reading_thread(task_type, task_id)\n if self._max_run_time is not None:\n\n def handler(signum, frame):\n del signum, frame\n self.terminate_all()\n signal.signal(signal.SIGALRM, handler)\n signal.alarm(self._max_run_time)", + "docstring": "Starts processes, one for each task in . Note that this is best effort by the applicable multiprocessing library, and it may take up to seconds for a subprocess to be successfully started.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py", + "ast_data": "FunctionDef name:start arg:self arguments arg With If Raise Call If Raise Call For Call For Call Call If Compare FunctionDef name:handler arg:signum arg:frame arguments arg arg Call Call Call" + }, + { + "library": "kornia", + "name": "connected_components", + "source_code": "def connected_components(image: Tensor, num_iterations: int=100) -> Tensor:\n if not isinstance(image, Tensor):\n raise TypeError(f'Input imagetype is not a Tensor. Got: {type(image)}')\n if not isinstance(num_iterations, int) or num_iterations < 1:\n raise TypeError('Input num_iterations must be a positive integer.')\n if len(image.shape) < 3 or image.shape[-3] != 1:\n raise ValueError(f'Input image shape must be (*,1,H,W). Got: {image.shape}')\n H, W = image.shape[-2:]\n image_view = image.view(-1, 1, H, W)\n mask = image_view == 1\n B, _, _, _ = image_view.shape\n out = torch.arange(1, B * H * W + 1, device=image.device, dtype=image.dtype).view((-1, 1, H, W))\n out[~mask] = 0\n for _ in range(num_iterations):\n out = F.max_pool2d(out, kernel_size=3, stride=1, padding=1)\n out = torch.mul(out, mask)\n return out.view_as(image)", + "docstring": "Compute the Connected-component labelling (CCL) algorithm. .. image:: The implementation is an adaptation of the following repository: .. warning:: This is an experimental API subject to changes and optimization improvements. .. note:: See a working example __. Args: image: the binarized input image with shape :math:. The image must be in floating point with range [0, 1]. num_iterations: the number of iterations to make the algorithm to converge. Return: The labels image with the same shape of the input image. Example: >>> img = torch.rand(2, 1, 4, 5) >>> img_labels = connected_components(img, num_iterations=100)", + "type": "function", + "file_path": "kornia\\kornia\\contrib\\connected_components.py", + "ast_data": "FunctionDef name:connected_components arg:image arg:num_iterations arguments arg arg If Call Raise Call Call If BoolOp Call Compare Raise Call If BoolOp Compare Call Compare Raise Call Assign Assign Call Assign Compare Assign Assign Call Call Assign For Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "add_directive_to_domain", + "source_code": "def add_directive_to_domain(self, domain: str, name: str, cls: type[Directive], override: bool=False) -> None:\n self.registry.add_directive_to_domain(domain, name, cls, override=override)", + "docstring": "Register a Docutils directive in a domain. Like :meth:, but the directive is added to the domain named *domain*. :param domain: The name of target domain :param name: A name of directive :param cls: A directive class :param override: If false, do not install it if another directive is already installed as the same name If true, unconditionally install the directive. .. versionadded:: 1.0 .. versionchanged:: 1.8 Add *override* keyword.", + "type": "method", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "FunctionDef name:add_directive_to_domain arg:self arg:domain arg:name arg:cls arg:override arguments arg arg arg arg arg Call" + }, + { + "library": "tensorflow", + "name": "_maybe_assert_valid_sample", + "source_code": "def _maybe_assert_valid_sample(self, counts):\n if not self.validate_args:\n return counts\n counts = distribution_util.embed_check_nonnegative_integer_form(counts)\n return control_flow_ops.with_dependencies([check_ops.assert_equal(self.total_count, math_ops.reduce_sum(counts, -1), message='counts last-dimension must sum to `self.total_count`')], counts)", + "docstring": "Check counts for proper shape, values, then return tensor version.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet_multinomial.py", + "ast_data": "FunctionDef name:_maybe_assert_valid_sample arg:self arg:counts arguments arg arg If Return return:yes Assign Call Return return:yes Call Call Call" + }, + { + "library": "kornia", + "name": "select_seeds", + "source_code": "def select_seeds(dist1: Tensor, R1: Union[float, Tensor], scores1: Tensor, fnn12: Tensor, mnn: Optional[Tensor]) -> Tuple[Tensor, Tensor]:\n im1neighmap = dist1 < R1 ** 2\n im1scorescomp = scores1.unsqueeze(1) > scores1.unsqueeze(0)\n if mnn is not None:\n im1bs = ~torch.any(im1neighmap & im1scorescomp & mnn.unsqueeze(0), dim=1) & mnn & (scores1 < 0.8 ** 2)\n else:\n im1bs = ~torch.any(im1neighmap & im1scorescomp, dim=1) & (scores1 < 0.8 ** 2)\n im1seeds = where(im1bs)[0]\n im2seeds = fnn12[im1bs]\n return (im1seeds, im2seeds)", + "docstring": "Select seed correspondences among the set of available matches. dist1: Precomputed distance matrix between keypoints in image I_1 R1: Base radius of neighborhoods in image I_1 scores1: Confidence scores on the putative_matches. Usually holds Lowe's ratio scores. fnn12: Matches between keypoints of I_1 and I_2. The i-th entry of fnn12 is j if and only if keypoint k_i in image I_1 is matched to keypoint k_j in image I_2 mnn: A mask indicating which putative matches are also mutual nearest neighbors. See documentation on 'force_seed_mnn' in the DEFAULT_CONFIG. If None, it disables the mutual nearest neighbor filtering on seed point selection. Expected a bool tensor with shape (num_keypoints_in_source_image,) Returns: Indices of seed points. im1seeds: Keypoint index of chosen seeds in image I_1 im2seeds: Keypoint index of chosen seeds in image I_2", + "type": "function", + "file_path": "kornia\\kornia\\feature\\adalam\\core.py", + "ast_data": "FunctionDef name:select_seeds arg:dist1 arg:R1 arg:scores1 arg:fnn12 arg:mnn arguments arg arg arg arg arg Assign Compare Assign Compare Call Call If Compare Assign Call Call Compare Assign Call Compare Assign Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "new_figure_manager", + "source_code": "def new_figure_manager(*args, **kwargs):\n _warn_if_gui_out_of_main_thread()\n return _get_backend_mod().new_figure_manager(*args, **kwargs)", + "docstring": "Create a new figure manager instance.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", + "ast_data": "FunctionDef name:new_figure_manager arguments arg arg Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "values", + "source_code": "def values(self):\n for key in self:\n yield self[key]", + "docstring": "Yield the last value on every key list.", + "type": "method", + "file_path": "django\\django\\utils\\datastructures.py", + "ast_data": "FunctionDef name:values arg:self arguments arg For" + }, + { + "library": "pandas", + "name": "add_memory_usage_line", + "source_code": "def add_memory_usage_line(self) -> None:\n self._lines.append(f'memory usage: {self.memory_usage_string}')", + "docstring": "Add line containing memory usage.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\info.py", + "ast_data": "FunctionDef name:add_memory_usage_line arg:self arguments arg Call" + }, + { + "library": "django", + "name": "get_user_permissions", + "source_code": "def get_user_permissions(self, user_obj, obj=None):\n return self._get_permissions(user_obj, obj, 'user')", + "docstring": "Return a set of permission strings the user has from their .", + "type": "method", + "file_path": "django\\django\\contrib\\auth\\backends.py", + "ast_data": "FunctionDef name:get_user_permissions arg:self arg:user_obj arg:obj arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_session", + "source_code": "def get_session(op_input_list=()):\n session = _get_session(op_input_list)\n if not _MANUAL_VAR_INIT:\n with session.graph.as_default():\n _initialize_variables(session)\n return session", + "docstring": "Returns the TF session to be used by the backend. If a default TensorFlow session is available, we will return it. Else, we will return the global Keras session assuming it matches the current graph. If no global Keras session exists at this point: we will create a new global session. Note that you can manually set the global session via . Args: op_input_list: An option sequence of tensors or ops, which will be used to determine the current graph. Otherwise the default graph will be used. Returns: A TensorFlow session.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:get_session arg:op_input_list arguments arg Assign Call If With Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "is_cupy_array", + "source_code": "def is_cupy_array(x: object) -> bool:\n cls = cast(Hashable, type(x))\n return _issubclass_fast(cls, 'cupy', 'ndarray')", + "docstring": "Return True if is a CuPy array. This function does not import CuPy if it has not already been imported and is therefore cheap to use. This also returns True for subclasses and CuPy scalar objects. See Also -------- array_namespace is_array_api_obj is_numpy_array is_torch_array is_ndonnx_array is_dask_array is_jax_array is_pydata_sparse_array", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py", + "ast_data": "FunctionDef name:is_cupy_array arg:x arguments arg Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_convert_node_paths_to_ints", + "source_code": "def _convert_node_paths_to_ints(self):\n if self._node_filters is None:\n return None\n path_to_int = {}\n for node_id in self._node_filters:\n int_node_id = None\n if isinstance(node_id, str):\n node_path = node_id.split('.')\n if node_path[0] != 'root':\n raise ValueError(f'When passing string identifiers to node_filters, the first name must be root. Received {node_path[0]}.')\n int_node_id = 0\n for n, name in enumerate(node_path[1:]):\n int_node_id = self._find_node_child(int_node_id, name, '.'.join(node_path[:n + 2]))\n path_to_int[node_id] = int_node_id\n else:\n raise TypeError('Elements in node_filters must be strings.')\n return path_to_int", + "docstring": "Maps all string node paths in node_filters to the int node ids.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load.py", + "ast_data": "FunctionDef name:_convert_node_paths_to_ints arg:self arguments arg If Compare Return return:no Assign For Assign If Call Assign Call If Compare Raise Call Assign For Call Assign Call Call Assign Raise Call Return return:yes" + }, + { + "library": "cryptography", + "name": "add_revoked_certificate", + "source_code": "def add_revoked_certificate(self, revoked_certificate: RevokedCertificate) -> CertificateRevocationListBuilder:\n if not isinstance(revoked_certificate, RevokedCertificate):\n raise TypeError('Must be an instance of RevokedCertificate')\n return CertificateRevocationListBuilder(self._issuer_name, self._last_update, self._next_update, self._extensions, [*self._revoked_certificates, revoked_certificate])", + "docstring": "Adds a revoked certificate to the CRL.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\x509\\base.py", + "ast_data": "FunctionDef name:add_revoked_certificate arg:self arg:revoked_certificate arguments arg arg If Call Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "inbound_nodes", + "source_code": "@property\n@doc_controls.do_not_doc_inheritable\ndef inbound_nodes(self):\n return self._inbound_nodes", + "docstring": "Deprecated, do NOT use! Only for compatibility with external Keras.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:inbound_nodes arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, input: Tensor, params: Optional[List[PatchParamItem]]=None) -> Tensor:\n if isinstance(input, (tuple,)):\n raise ValueError('tuple input is not currently supported.')\n if params is None:\n params = self.forward_parameters(input.shape)\n output = self.transform_inputs(input, params=params)\n self._params = params\n return output", + "docstring": "Input transformation will be returned if input is a tuple.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\patch.py", + "ast_data": "FunctionDef name:forward arg:self arg:input arg:params arguments arg arg arg If Call Raise Call If Compare Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "full_name_node", + "source_code": "def full_name_node(name, ctx=ast.Load()):\n names = name.split('.')\n names.reverse()\n node = ast.Name(id=names.pop(), ctx=ast.Load())\n while names:\n node = ast.Attribute(value=node, attr=names.pop(), ctx=ast.Load())\n node.ctx = ctx\n return node", + "docstring": "Make an Attribute or Name node for name. Translate a qualified name into nested Attribute nodes (and a Name node). Args: name: The name to translate to a node. ctx: What context this name is used in. Defaults to Load() Returns: A Name or Attribute node.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py", + "ast_data": "FunctionDef name:full_name_node arg:name arg:ctx arguments arg arg Call Assign Call Call Assign Call Call Call While Assign Call Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "probs", + "source_code": "@property\ndef probs(self):\n return self._probs", + "docstring": "Vector of coordinatewise probabilities.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\categorical.py", + "ast_data": "FunctionDef name:probs arg:self arguments arg Return return:yes" + }, + { + "library": "seaborn", + "name": "_point_kwargs_backcompat", + "source_code": "def _point_kwargs_backcompat(self, scale, join, kwargs):\n if scale is not deprecated:\n lw = mpl.rcParams['lines.linewidth'] * 1.8 * scale\n mew = lw * 0.75\n ms = lw * 2\n msg = '\\n\\nThe `scale` parameter is deprecated and will be removed in v0.15.0. You can now control the size of each plot element using matplotlib `Line2D` parameters (e.g., `linewidth`, `markersize`, etc.).\\n'\n warnings.warn(msg, stacklevel=3)\n kwargs.update(linewidth=lw, markeredgewidth=mew, markersize=ms)\n if join is not deprecated:\n msg = '\\n\\nThe `join` parameter is deprecated and will be removed in v0.15.0.'\n if not join:\n msg += \" You can remove the line between points with `linestyle='none'`.\"\n kwargs.update(linestyle='')\n msg += '\\n'\n warnings.warn(msg, stacklevel=3)", + "docstring": "Provide two cycles where scale= and join= work, but redirect to kwargs.", + "type": "method", + "file_path": "seaborn\\seaborn\\categorical.py", + "ast_data": "FunctionDef name:_point_kwargs_backcompat arg:self arg:scale arg:join arg:kwargs arguments arg arg arg arg If Compare Assign Assign Assign Assign Call Call If Compare Assign If Call Call" + }, + { + "library": "kornia", + "name": "quaternion_log_to_exp", + "source_code": "def quaternion_log_to_exp(quaternion: Tensor, eps: float=1e-08) -> Tensor:\n if not isinstance(quaternion, Tensor):\n raise TypeError(f'Input type is not a Tensor. Got {type(quaternion)}')\n if not quaternion.shape[-1] == 3:\n raise ValueError(f'Input must be a tensor of shape (*, 3). Got {quaternion.shape}')\n norm_q: Tensor = torch.norm(quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps)\n quaternion_vector: Tensor = quaternion * sin(norm_q) / norm_q\n quaternion_scalar: Tensor = cos(norm_q)\n quaternion_exp: Tensor = tensor([])\n quaternion_exp = concatenate((quaternion_scalar, quaternion_vector), dim=-1)\n return quaternion_exp", + "docstring": "Apply exponential map to log quaternion. The quaternion should be in (w, x, y, z) format. Args: quaternion: a tensor containing a quaternion to be converted. The tensor can be of shape :math:. eps: a small number for clamping. Return: the quaternion exponential map of shape :math:. Example: >>> quaternion = tensor((0., 0., 0.)) >>> quaternion_log_to_exp(quaternion, eps=torch.finfo(quaternion.dtype).eps) tensor([1., 0., 0., 0.])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:quaternion_log_to_exp arg:quaternion arg:eps arguments arg arg If Call Raise Call Call If Compare Raise Call Call Call Call Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "bessel_i0", + "source_code": "@tf_export('math.bessel_i0', 'math.special.bessel_i0')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef bessel_i0(x, name=None):\n with ops.name_scope(name, 'bessel_i0', [x]):\n return gen_special_math_ops.bessel_i0(x)", + "docstring": "Computes the Bessel i0 function of element-wise. Modified Bessel function of order 0. It is preferable to use the numerically stabler function instead. >>> tf.math.special.bessel_i0([-1., -0.5, 0.5, 1.]).numpy() array([1.26606588, 1.06348337, 1.06348337, 1.26606588], dtype=float32) Args: x: A or . Must be one of the following types: , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.i0 @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py", + "ast_data": "FunctionDef name:bessel_i0 arg:x arg:name arguments arg arg With Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_hypersphere_volume_sample", + "source_code": "def _hypersphere_volume_sample(self, center: np.ndarray, radius: DecimalNumber, candidates: IntNumber=1) -> np.ndarray:\n x = self.rng.standard_normal(size=(candidates, self.d))\n ssq = np.sum(x ** 2, axis=1)\n fr = radius * gammainc(self.d / 2, ssq / 2) ** (1 / self.d) / np.sqrt(ssq)\n fr_tiled = np.tile(fr.reshape(-1, 1), (1, self.d))\n p = center + np.multiply(x, fr_tiled)\n return p", + "docstring": "Uniform sampling within hypersphere.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_qmc.py", + "ast_data": "FunctionDef name:_hypersphere_volume_sample arg:self arg:center arg:radius arg:candidates arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "convert_shape_to_symint", + "source_code": "def convert_shape_to_symint(lst: Iterable[Union[int, sympy.Expr]]) -> list[Union[int, torch.SymInt]]:\n return [convert_to_symint(i) for i in lst]", + "docstring": "Takes a list of shapes from Inductor and converts them into symints (or just ints if all shapes are static).", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\utils.py", + "ast_data": "FunctionDef name:convert_shape_to_symint arg:lst arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "to_numpy", + "source_code": "def to_numpy(self, dtype: npt.DTypeLike | None=None, copy: bool=False, na_value: object=lib.no_default) -> np.ndarray:\n result = np.asarray(self, dtype=dtype)\n if copy or na_value is not lib.no_default:\n result = result.copy()\n if na_value is not lib.no_default:\n result[self.isna()] = na_value\n return result", + "docstring": "Convert to a NumPy ndarray. This is similar to :meth:, but may provide additional control over how the conversion is done. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:. copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that `dtype` and the type of the array. Returns ------- numpy.ndarray", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:to_numpy arg:self arg:dtype arg:copy arg:na_value arguments arg arg arg arg Assign Call If BoolOp Compare Assign Call If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "LayerRNNCell", + "source_code": "class LayerRNNCell(RNNCell):\n\n def __call__(self, inputs, state, scope=None, *args, **kwargs):\n return base_layer.Layer.__call__(self, inputs, state, *args, scope=scope, **kwargs)", + "docstring": "Subclass of RNNCells that act like proper objects. For backwards compatibility purposes, most instances allow their methods to instantiate variables via . The underlying variable scope thus keeps track of any variables, and returning cached versions. This is atypical of objects, which separate this part of layer building into a method that is only called once. Here we provide a subclass for objects that act exactly as objects do. They must provide a method and their methods do not access Variables .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py", + "ast_data": "ClassDef name:LayerRNNCell FunctionDef name:__call__ arg:self arg:inputs arg:state arg:scope arguments arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_flush_tensor_values_cache", + "source_code": "def _flush_tensor_values_cache(self, tensor_fetches, op_fetches, on_tpu, tensor_trace_order, graph):\n if not tensor_trace_order.traced_tensors:\n logging.warn('No tensor values being traced. No flush cache op added.')\n return tensor_fetches\n with ops.control_dependencies(op_fetches + [tensor.op for tensor in tensor_fetches]):\n flush_cache_op = self._generate_flush_cache_op(self._tt_config.num_replicas, on_tpu, tensor_trace_order, graph)\n return control_flow_ops.tuple(tensor_fetches, control_inputs=[flush_cache_op])", + "docstring": "Flushes the intermediate tensor values in the graph to the cache. Args: tensor_fetches: list of tensor results returned by the model_fn. op_fetches: list of ops that are returned by the model_fn, e.g., train_op. on_tpu: if the graph is executed on TPU. tensor_trace_order: TensorTraceOrder object holding tensorname to id map. graph: TensorFlow graph. Returns: An identical copy of tensor_fetches.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:_flush_tensor_values_cache arg:self arg:tensor_fetches arg:op_fetches arg:on_tpu arg:tensor_trace_order arg:graph arguments arg arg arg arg arg arg If Call Return return:yes With Call Assign Call Return return:yes Call" + }, + { + "library": "seaborn", + "name": "diverging_palette", + "source_code": "def diverging_palette(h_neg, h_pos, s=75, l=50, sep=1, n=6, center='light', as_cmap=False):\n palfunc = dict(dark=dark_palette, light=light_palette)[center]\n n_half = int(128 - sep // 2)\n neg = palfunc((h_neg, s, l), n_half, reverse=True, input='husl')\n pos = palfunc((h_pos, s, l), n_half, input='husl')\n midpoint = dict(light=[(0.95, 0.95, 0.95)], dark=[(0.133, 0.133, 0.133)])[center]\n mid = midpoint * sep\n pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap)\n return pal", + "docstring": "Make a diverging palette between two HUSL colors. If you are using the IPython notebook, you can also choose this palette interactively with the :func: function. Parameters ---------- h_neg, h_pos : float in [0, 359] Anchor hues for negative and positive extents of the map. s : float in [0, 100], optional Anchor saturation for both extents of the map. l : float in [0, 100], optional Anchor lightness for both extents of the map. sep : int, optional Size of the intermediate region. n : int, optional Number of colors in the palette (if not returning a cmap) center : {\"light\", \"dark\"}, optional Whether the center of the palette is light or dark as_cmap : bool, optional If True, return a :class:. Returns ------- palette list of RGB tuples or :class: See Also -------- dark_palette : Create a sequential palette with dark values. light_palette : Create a sequential palette with light values. Examples -------- .. include: ../docstrings/diverging_palette.rst", + "type": "function", + "file_path": "seaborn\\seaborn\\palettes.py", + "ast_data": "FunctionDef name:diverging_palette arg:h_neg arg:h_pos arg:s arg:l arg:sep arg:n arg:center arg:as_cmap arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Call Return return:yes" + }, + { + "library": "seaborn", + "name": "get_color_cycle", + "source_code": "def get_color_cycle():\n cycler = mpl.rcParams['axes.prop_cycle']\n return cycler.by_key()['color'] if 'color' in cycler.keys else ['.15']", + "docstring": "Return the list of colors in the current matplotlib color cycle Parameters ---------- None Returns ------- colors : list List of matplotlib colors in the current cycle, or dark gray if the current color cycle is empty.", + "type": "function", + "file_path": "seaborn\\seaborn\\utils.py", + "ast_data": "FunctionDef name:get_color_cycle arguments Assign Return return:yes Compare Call" + }, + { + "library": "pytorch", + "name": "reduce_acc_nodes_non_tensor_input_helper", + "source_code": "def reduce_acc_nodes_non_tensor_input_helper(self, cpu_worklist: NodeList):\n while cpu_worklist:\n node = cpu_worklist.pop(0)\n for user in node.users:\n if user in self.acc_nodes:\n self.acc_nodes.remove(user)\n if not is_node_output_tensor(user):\n cpu_worklist.append(user)", + "docstring": "Transitively excludes nodes from ACC supported set. For every node in the worklist: - removes its downstream ACC nodes from ACC supported set, - if any downstream ACC node produces non-tensor output, then it gets added into the worklist.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py", + "ast_data": "FunctionDef name:reduce_acc_nodes_non_tensor_input_helper arg:self arg:cpu_worklist arguments arg arg While Assign Call For If Compare Call If Call Call" + }, + { + "library": "pytorch", + "name": "module_load", + "source_code": "def module_load(self, other, assign=False):\n if has_torch_function_variadic(self, other):\n return handle_torch_function(Tensor.module_load, (self, other), self, other, assign=assign)\n if assign:\n return other.detach()\n else:\n return self.copy_(other).detach()", + "docstring": "Defines how to transform `~nn.Module.load_state_dict~torch.__future__.get_swap_module_params_on_conversion~torch.utils.swap_tensors~nn.Module.load_state_dictnn.Module.load_state_dict`", + "type": "method", + "file_path": "pytorch\\torch\\_tensor.py", + "ast_data": "FunctionDef name:module_load arg:self arg:other arg:assign arguments arg arg arg If Call Return return:yes Call If Return return:yes Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "OverlapsRightLookup", + "source_code": "@BaseSpatialField.register_lookup\nclass OverlapsRightLookup(GISLookup):\n lookup_name = 'overlaps_right'", + "docstring": "The 'overlaps_right' operator returns true if A's bounding box overlaps or is to the right of B's bounding box.", + "type": "class", + "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py", + "ast_data": "ClassDef name:OverlapsRightLookup Assign" + }, + { + "library": "pandas", + "name": "_center_window", + "source_code": "def _center_window(self, result: np.ndarray, offset: int) -> np.ndarray:\n if offset > 0:\n lead_indexer = [slice(offset, None)]\n result = np.copy(result[tuple(lead_indexer)])\n return result", + "docstring": "Center the result in the window for weighted rolling aggregations.", + "type": "method", + "file_path": "pandas\\pandas\\core\\window\\rolling.py", + "ast_data": "FunctionDef name:_center_window arg:self arg:result arg:offset arguments arg arg arg If Compare Assign Call Assign Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "visit", + "source_code": "def visit(self, node: ast.AST) -> None:\n super().visit(node)\n self.previous = node", + "docstring": "Updates self.previous to the given node.", + "type": "method", + "file_path": "sphinx\\sphinx\\pycode\\parser.py", + "ast_data": "FunctionDef name:visit arg:self arg:node arguments arg arg Call Call Assign" + }, + { + "library": "scikit-learn", + "name": "fit_transform", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit_transform(self, X, y=None):\n return self._transform(X, fitting=True)", + "docstring": "Learn a list of feature name -> indices mappings and transform X. Like fit(X) followed by transform(X), but does not require materializing X in memory. Parameters ---------- X : Mapping or iterable over Mappings Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). .. versionchanged:: 0.24 Accepts multiple string values for one categorical feature. y : (ignored) Ignored parameter. Returns ------- Xa : {array, sparse matrix} Feature vectors; always 2-d.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_extraction\\_dict_vectorizer.py", + "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "PolymorphicFunction", + "source_code": "@tf_export('types.experimental.PolymorphicFunction', 'types.experimental.GenericFunction', v1=[])\nclass PolymorphicFunction(Callable, metaclass=abc.ABCMeta):\n\n @abc.abstractmethod\n def get_concrete_function(self, *args, **kwargs) -> ConcreteFunction:\n pass\n\n def experimental_get_compiler_ir(self, *args, **kwargs):\n pass", + "docstring": "Base class for polymorphic graph functions. Graph functions are Python callable objects that dispatch calls to a TensorFlow graph. Polymorphic graph functions can be backed by multiple TF graphs, and automatically select the appropriate specialization based on the type of input they were called with. They may also create specializations on the fly if necessary, for example by tracing. Also see .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\types\\core.py", + "ast_data": "ClassDef name:PolymorphicFunction FunctionDef name:get_concrete_function arg:self arguments arg arg arg FunctionDef name:experimental_get_compiler_ir arg:self arguments arg arg arg Call" + }, + { + "library": "tensorflow", + "name": "_batch_rp_spec_head", + "source_code": "def _batch_rp_spec_head(old_head: RowPartitionSpec, batch_size: Optional[int]) -> RowPartitionSpec:\n nvals = None if old_head.nrows is None or batch_size is None else batch_size * old_head.nrows\n return RowPartitionSpec(nrows=batch_size, nvals=nvals, uniform_row_length=old_head.nrows, dtype=old_head.dtype)", + "docstring": "Creates a RowPartitionSpec representing the new dimension created.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:_batch_rp_spec_head arg:old_head arg:batch_size arguments arg arg Assign BoolOp Compare Compare Return return:yes Call" + }, + { + "library": "django", + "name": "uses_datetime_field", + "source_code": "@cached_property\ndef uses_datetime_field(self):\n model = self.get_queryset().model if self.model is None else self.model\n field = model._meta.get_field(self.get_date_field())\n return isinstance(field, models.DateTimeField)", + "docstring": "Return if the date field is a and if it's a .", + "type": "method", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "FunctionDef name:uses_datetime_field arg:self arguments arg Assign Compare Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "assert_static", + "source_code": "def assert_static(self, val):\n assert not val.is_dynamic(), 'expected static but got dynamic (run with TORCH_LOGS=dynamic for more info)'", + "docstring": "Asserts that the int is static (and not dynamic, per dynamic shapes)", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\comptime.py", + "ast_data": "FunctionDef name:assert_static arg:self arg:val arguments arg arg Call" + }, + { + "library": "scipy", + "name": "_with_data", + "source_code": "def _with_data(self, data, copy=True):\n if copy:\n return self.__class__((data, self.indices.copy(), self.indptr.copy()), shape=self.shape, dtype=data.dtype)\n else:\n return self.__class__((data, self.indices, self.indptr), shape=self.shape, dtype=data.dtype)", + "docstring": "Returns a matrix with the same sparsity structure as self, but with different data. By default the structure arrays (i.e. .indptr and .indices) are copied.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_compressed.py", + "ast_data": "FunctionDef name:_with_data arg:self arg:data arg:copy arguments arg arg arg If Return return:yes Call Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "draw_image", + "source_code": "def draw_image(self, gc, x, y, im, transform=None):\n raise NotImplementedError", + "docstring": "Draw an RGBA image. Parameters ---------- gc : A graphics context with clipping information. x : float The distance in physical units (i.e., dots or pixels) from the left hand side of the canvas. y : float The distance in physical units (i.e., dots or pixels) from the bottom side of the canvas. im : (N, M, 4) array of An array of RGBA pixels. transform : If and only if the concrete backend is written such that returns `.Affine2DBase~.RendererBase.draw_image`. The translation vector of the transformation is given in physical units (i.e., dots or pixels). Note that the transformation does not override *x* and *y*, and has to be applied *before* translatingthe result by *x* and *y* (this can be accomplished by adding *x* and *y* to the translation vector defined by *transform*).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:draw_image arg:self arg:gc arg:x arg:y arg:im arg:transform arguments arg arg arg arg arg arg Raise" + }, + { + "library": "tensorflow", + "name": "ScopedTFStatus", + "source_code": "class ScopedTFStatus(object):\n __slots__ = ['status']\n\n def __init__(self):\n self.status = c_api.TF_NewStatus()\n\n def __del__(self):\n if c_api is not None and c_api.TF_DeleteStatus is not None:\n c_api.TF_DeleteStatus(self.status)", + "docstring": "Wrapper around TF_Status that handles deletion.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\c_api_util.py", + "ast_data": "ClassDef name:ScopedTFStatus Assign FunctionDef name:__init__ arg:self arguments arg Assign Call FunctionDef name:__del__ arg:self arguments arg If BoolOp Compare Compare Call" + }, + { + "library": "tensorflow", + "name": "TensorShapeProtoToList", + "source_code": "def TensorShapeProtoToList(shape):\n return [dim.size for dim in shape.dim]", + "docstring": "Convert a TensorShape to a list. Args: shape: A TensorShapeProto. Returns: List of integers representing the dimensions of the tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_util.py", + "ast_data": "FunctionDef name:TensorShapeProtoToList arg:shape arguments arg Return return:yes" + }, + { + "library": "django", + "name": "get_template_sources", + "source_code": "def get_template_sources(self, template_name):\n raise NotImplementedError('subclasses of Loader must provide a get_template_sources() method')", + "docstring": "An iterator that yields possible matching template paths for a template name.", + "type": "method", + "file_path": "django\\django\\template\\loaders\\base.py", + "ast_data": "FunctionDef name:get_template_sources arg:self arg:template_name arguments arg arg Raise Call" + }, + { + "library": "scikit-learn", + "name": "set_params", + "source_code": "def set_params(self, **kwargs):\n self._set_params('transformer_list', **kwargs)\n return self", + "docstring": "Set the parameters of this estimator. Valid parameter keys can be listed with `transformer_listtransform_list`. Parameters of the transformers may be set using its name and the parameter name separated by a '__'. Returns ------- self : object FeatureUnion class instance.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:set_params arg:self arguments arg arg Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_controller", + "source_code": "@tf_contextlib.contextmanager\ndef get_controller(self, default: T) -> Iterator[T]:\n self.stack.append(default)\n try:\n yield default\n finally:\n if self.stack:\n if self._enforce_nesting:\n if self.stack[-1] is not default:\n raise AssertionError('Nesting violated for default stack of %s objects' % type(default))\n self.stack.pop()\n else:\n self.stack.remove(default)", + "docstring": "A context manager for manipulating a default stack.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\stack.py", + "ast_data": "FunctionDef name:get_controller arg:self arg:default arguments arg arg Call Try If If If Compare Raise Call Call Call Call" + }, + { + "library": "pandas", + "name": "add_index_range_line", + "source_code": "def add_index_range_line(self) -> None:\n self._lines.append(self.data.index._summary())", + "docstring": "Add line with range of indices to the table.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\info.py", + "ast_data": "FunctionDef name:add_index_range_line arg:self arguments arg Call Call" + }, + { + "library": "numpy", + "name": "verify_matching_signatures", + "source_code": "def verify_matching_signatures(implementation, dispatcher):\n implementation_spec = ArgSpec(*getargspec(implementation))\n dispatcher_spec = ArgSpec(*getargspec(dispatcher))\n if implementation_spec.args != dispatcher_spec.args or implementation_spec.varargs != dispatcher_spec.varargs or implementation_spec.keywords != dispatcher_spec.keywords or (bool(implementation_spec.defaults) != bool(dispatcher_spec.defaults)) or (implementation_spec.defaults is not None and len(implementation_spec.defaults) != len(dispatcher_spec.defaults)):\n raise RuntimeError('implementation and dispatcher for %s have different function signatures' % implementation)\n if implementation_spec.defaults is not None:\n if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults):\n raise RuntimeError('dispatcher functions can only use None for default argument values')", + "docstring": "Verify that a dispatcher function has the right signature.", + "type": "function", + "file_path": "numpy\\numpy\\_core\\overrides.py", + "ast_data": "FunctionDef name:verify_matching_signatures arg:implementation arg:dispatcher arguments arg arg Assign Call Call Assign Call Call If BoolOp Compare Compare Compare Compare Call Call BoolOp Compare Compare Call Call Raise Call If Compare If Compare Call Raise Call" + }, + { + "library": "pytorch", + "name": "edge", + "source_code": "def edge(a, b, tie_breaker=hash):\n return supercedes(a, b) and (not supercedes(b, a) or tie_breaker(a) > tie_breaker(b))", + "docstring": "A should be checked before B Tie broken by tie_breaker, defaults to ``", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\conflict.py", + "ast_data": "FunctionDef name:edge arg:a arg:b arg:tie_breaker arguments arg arg arg Return return:yes BoolOp Call BoolOp Call Compare Call Call" + }, + { + "library": "tensorflow", + "name": "name", + "source_code": "@property\ndef name(self):\n return '{}_bucketized'.format(self.source_column.name)", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "Treccani", + "source_code": "class Treccani(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))\n self.custom_bounds = [(-2, 2), (-2, 2)]\n self.global_optimum = [[-2.0, 0.0]]\n self.fglob = 0\n\n def fun(self, x, *args):\n self.nfev += 1\n return x[0] ** 4 + 4.0 * x[0] ** 3 + 4.0 * x[0] ** 2 + x[1] ** 2", + "docstring": "Treccani objective function. This class defines the Treccani [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Treccani}}(x) = x_1^4 + 4x_1^3 + 4x_1^2 + x_2^2 with :math: for :math:. *Global optimum*: :math: for :math: or :math:. .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_T.py", + "ast_data": "ClassDef name:Treccani FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "debug_draw_graph", + "source_code": "def debug_draw_graph(self) -> None:\n if os.environ.get('INDUCTOR_WRITE_SCHEDULER_GRAPH', None) == '1':\n from .debug import draw_buffers\n draw_buffers(self.nodes, print_graph=True)", + "docstring": "Generate an image of the graph for debugging", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:debug_draw_graph arg:self arguments arg If Compare Call Call" + }, + { + "library": "django", + "name": "__iter__", + "source_code": "def __iter__(self):\n for name in self.fields:\n yield self[name]", + "docstring": "Yield the form's fields as BoundField objects.", + "type": "method", + "file_path": "django\\django\\forms\\forms.py", + "ast_data": "FunctionDef name:__iter__ arg:self arguments arg For" + }, + { + "library": "tensorflow", + "name": "node_inputs", + "source_code": "def node_inputs(self, node_name, is_control=False, device_name=None):\n if not self._debug_graphs:\n raise LookupError('Node inputs are not loaded from partition graphs yet.')\n device_name = self._infer_device_name(device_name, node_name)\n if is_control:\n return self._debug_graphs[device_name].node_ctrl_inputs[node_name]\n else:\n return self._debug_graphs[device_name].node_inputs[node_name]", + "docstring": "Get the inputs of given node according to partition graphs. Args: node_name: Name of the node. is_control: () Whether control inputs, rather than non-control inputs, are to be returned. device_name: () name of the device. If there is only one device or if node_name exists on only one device, this argument is optional. Returns: ( of ) inputs to the node, as a list of node names. Raises: LookupError: If node inputs and control inputs have not been loaded from partition graphs yet.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", + "ast_data": "FunctionDef name:node_inputs arg:self arg:node_name arg:is_control arg:device_name arguments arg arg arg arg If Raise Call Assign Call If Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "run_on", + "source_code": "@tf_export('experimental.dtensor.run_on', v1=[])\n@deprecation.deprecated(None, 'Use `dtensor.default_mesh` scope instead.')\n@contextlib.contextmanager\ndef run_on(mesh: layout_lib.Mesh):\n with default_mesh(mesh):\n yield", + "docstring": "Runs enclosed functions in the DTensor device scope. This function returns a scope. All the ops and tf.functions in this scope will run on the DTensor device using the mesh provided. This is useful for wrapping any tf.function that doesn't take a DTensor as input but would like to produce DTensor as result. The scope will also make sure all small constants be replicated as DTensor. Args: mesh: A Mesh instance to extract a default mesh from. Yields: A context in which all ops and tf.functions will run on the DTensor device.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\api.py", + "ast_data": "FunctionDef name:run_on arg:mesh arguments arg With Call Call Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, norm=None, cmap=None, *, colorizer=None, **kwargs):\n super().__init__(**kwargs)\n self._A = None\n self._colorizer = self._get_colorizer(colorizer=colorizer, norm=norm, cmap=cmap)\n self.colorbar = None\n self._id_colorizer = self._colorizer.callbacks.connect('changed', self.changed)\n self.callbacks = cbook.CallbackRegistry(signals=['changed'])", + "docstring": "Parameters ---------- norm : (or subclass thereof) or str or None The normalizing object which scales data, typically into the interval `str.Normalize~matplotlib.colors.Colormap` The colormap used to map normalized data values to RGBA colors.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:norm arg:cmap arguments arg arg arg arg arg Call Call Assign Assign Call Assign Assign Call Assign Call" + }, + { + "library": "django", + "name": "min_x", + "source_code": "@property\ndef min_x(self):\n return self._envelope.MinX", + "docstring": "Return the value of the minimum X coordinate.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\envelope.py", + "ast_data": "FunctionDef name:min_x arg:self arguments arg Return return:yes" + }, + { + "library": "virtualenv", + "name": "can_describe", + "source_code": "@classmethod\ndef can_describe(cls, interpreter):\n return True", + "docstring": "Knows means it knows how the output will look.", + "type": "method", + "file_path": "virtualenv\\src\\virtualenv\\create\\describe.py", + "ast_data": "FunctionDef name:can_describe arg:cls arg:interpreter arguments arg arg Return return:yes" + }, + { + "library": "scipy", + "name": "hilbert", + "source_code": "def hilbert(x, _cache=_cache):\n if isinstance(_cache, threading.local):\n if not hasattr(_cache, 'hilbert_cache'):\n _cache.hilbert_cache = {}\n _cache = _cache.hilbert_cache\n tmp = asarray(x)\n if iscomplexobj(tmp):\n return hilbert(tmp.real, _cache) + 1j * hilbert(tmp.imag, _cache)\n n = len(x)\n omega = _cache.get(n)\n if omega is None:\n if len(_cache) > 20:\n while _cache:\n _cache.popitem()\n\n def kernel(k):\n if k > 0:\n return 1.0\n elif k < 0:\n return -1.0\n return 0.0\n omega = convolve.init_convolution_kernel(n, kernel, d=1)\n _cache[n] = omega\n overwrite_x = _datacopied(tmp, x)\n return convolve.convolve(tmp, omega, swap_real_imag=1, overwrite_x=overwrite_x)", + "docstring": "Return Hilbert transform of a periodic sequence x. If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = sqrt(-1)*sign(j) * x_j y_0 = 0 Parameters ---------- x : array_like The input array, should be periodic. _cache : dict, optional Dictionary that contains the kernel used to do a convolution with. Returns ------- y : ndarray The transformed input. See Also -------- scipy.signal.hilbert : Compute the analytic signal, using the Hilbert transform. Notes ----- If `scipy.signal.hilbert` does have an extra -1 factor compared to this function.", + "type": "function", + "file_path": "scipy\\scipy\\fftpack\\_pseudo_diffs.py", + "ast_data": "FunctionDef name:hilbert arg:x arg:_cache arguments arg arg If Call If Call Assign Assign Assign Call If Call Return return:yes Call Call Assign Call Assign Call If Compare If Compare Call While Call FunctionDef name:kernel arg:k arguments arg If Compare Return return:yes If Compare Return return:yes Return return:yes Assign Call Assign Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "fuse_linear_bn_eval", + "source_code": "def fuse_linear_bn_eval(linear: LinearT, bn: torch.nn.modules.batchnorm._BatchNorm) -> LinearT:\n assert not (linear.training or bn.training), 'Fusion only for eval!'\n fused_linear = copy.deepcopy(linear)\n '\\n Linear-BN needs to be fused while preserving the shapes of linear weight/bias.\\n To preserve the shapes of linear weight/bias, the channel dim of bn needs to be broadcastable with the last dim of linear,\\n because bn operates over the channel dim, (N, C_in, H, W) while linear operates over the last dim, (*, H_in).\\n To be broadcastable, the number of features in bn and\\n the number of output features from linear must satisfy the following condition:\\n 1. they are equal, or\\n 2. the number of features in bn is 1\\n Otherwise, skip the folding path\\n '\n assert linear.out_features == bn.num_features or bn.num_features == 1, 'To fuse, linear.out_features == bn.num_features or bn.num_features == 1'\n assert bn.running_mean is not None and bn.running_var is not None\n fused_linear.weight, fused_linear.bias = fuse_linear_bn_weights(fused_linear.weight, fused_linear.bias, bn.running_mean, bn.running_var, bn.eps, bn.weight, bn.bias)\n return fused_linear", + "docstring": "Fuse a linear module and a BatchNorm module into a single, new linear module. Args: linear (torch.nn.Linear): A Linear module. bn (torch.nn.modules.batchnorm._BatchNorm): A BatchNorm module. Returns: torch.nn.Linear: The fused linear module. .. note:: Both `` must have its running buffers computed.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\fusion.py", + "ast_data": "FunctionDef name:fuse_linear_bn_eval arg:linear arg:bn arguments arg arg BoolOp Assign Call BoolOp Compare Compare BoolOp Compare Compare Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "minmax_scale", + "source_code": "@validate_params({'X': ['array-like'], 'axis': [Options(Integral, {0, 1})]}, prefer_skip_nested_validation=False)\ndef minmax_scale(X, feature_range=(0, 1), *, axis=0, copy=True):\n X = check_array(X, copy=False, ensure_2d=False, dtype=FLOAT_DTYPES, ensure_all_finite='allow-nan')\n original_ndim = X.ndim\n if original_ndim == 1:\n X = X.reshape(X.shape[0], 1)\n s = MinMaxScaler(feature_range=feature_range, copy=copy)\n if axis == 0:\n X = s.fit_transform(X)\n else:\n X = s.fit_transform(X.T).T\n if original_ndim == 1:\n X = X.ravel()\n return X", + "docstring": "Transform features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by (when `User Guide ~sklearn.preprocessing.MinMaxScaler~sklearn.preprocessing.minmax_scale~sklearn.preprocessing.MinMaxScalerPipeline pipe = make_pipeline(MinMaxScaler(), LogisticRegression())~sklearn.pipeline.Pipelinesphx_glr_auto_examples_preprocessing_plot_all_scaling.py`. Examples -------- >>> from sklearn.preprocessing import minmax_scale >>> X = [[-2, 1, 2], [-1, 0, 1]] >>> minmax_scale(X, axis=0) # scale each column independently array([[0., 1., 1.], [1., 0., 0.]]) >>> minmax_scale(X, axis=1) # scale each row independently array([[0. , 0.75, 1. ], [0. , 0.5 , 1. ]])", + "type": "function", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", + "ast_data": "FunctionDef name:minmax_scale arg:X arg:feature_range arguments arg arg arg arg Assign Call Assign If Compare Assign Call Assign Call If Compare Assign Call Assign Call If Compare Assign Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "disconnect", + "source_code": "def disconnect(self, cid):\n self._observers.disconnect(cid)", + "docstring": "Remove the observer with connection id *cid*. Parameters ---------- cid : int Connection id of the observer to be removed.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:disconnect arg:self arg:cid arguments arg arg Call" + }, + { + "library": "matplotlib", + "name": "get_position", + "source_code": "def get_position(self):\n return self.get_subplotspec().get_position(self.figure).bounds", + "docstring": "Return the bounds of the subplot box.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py", + "ast_data": "FunctionDef name:get_position arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "SparseDLRM", + "source_code": "class SparseDLRM(DLRM_Net):\n\n def __init__(self, **args):\n super().__init__(**args)\n\n def forward(self, dense_x, lS_o, lS_i):\n x = self.apply_mlp(dense_x, self.bot_l)\n ly = self.apply_emb(lS_o, lS_i, self.emb_l, self.v_W_l)\n z = self.interact_features(x, ly)\n z = z.to_sparse_coo()\n z = torch.mm(z, self.top_l[0].weight.T).add(self.top_l[0].bias)\n for layer in self.top_l[1:]:\n z = layer(z)\n return z", + "docstring": "The SparseDLRM model is a wrapper around the DLRM_Net model that tries to use torch.sparse tensors for the features obtained after the call. The idea is to do a simple torch.mm() with the weight matrix of the first linear layer of the top layer.", + "type": "class", + "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\benchmarks\\dlrm_utils.py", + "ast_data": "ClassDef name:SparseDLRM FunctionDef name:__init__ arg:self arguments arg arg Call Call FunctionDef name:forward arg:self arg:dense_x arg:lS_o arg:lS_i arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Call For Assign Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "regenerate", + "source_code": "def regenerate(self):\n sess = cherrypy.serving.session\n sess.regenerate()\n relevant = ('path', 'path_header', 'name', 'timeout', 'domain', 'secure')\n conf = dict(((k, v) for k, v in self._merged_args().items() if k in relevant))\n _sessions.set_response_cookie(**conf)", + "docstring": "Drop the current session and make a new one (with a new id).", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cptools.py", + "ast_data": "FunctionDef name:regenerate arg:self arguments arg Assign Call Assign Assign Call Call Call Compare Call" + }, + { + "library": "pytorch", + "name": "remove_load_call_method", + "source_code": "def remove_load_call_method(instructions: list[Instruction]) -> list[Instruction]:\n assert sys.version_info < (3, 11)\n rewrites = {'LOAD_METHOD': 'LOAD_ATTR', 'CALL_METHOD': 'CALL_FUNCTION'}\n for inst in instructions:\n if inst.opname in rewrites:\n inst.opname = rewrites[inst.opname]\n inst.opcode = dis.opmap[inst.opname]\n return instructions", + "docstring": "LOAD_METHOD puts a NULL on the stack which causes issues, so remove it", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", + "ast_data": "FunctionDef name:remove_load_call_method arg:instructions arguments arg Compare Assign For If Compare Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "set_default_mmap_options", + "source_code": "class set_default_mmap_options:\n\n def __init__(self, flags: int) -> None:\n if IS_WINDOWS:\n raise RuntimeError('Changing the default mmap options is currently not supported for Windows')\n if flags != MAP_PRIVATE and flags != MAP_SHARED:\n raise ValueError(f'Invalid argument in function set_default_mmap_options, expected mmap.MAP_PRIVATE or mmap.MAP_SHARED, but got {flags}')\n from torch.utils.serialization import config\n self.prev = config.load.mmap_flags\n config.load.mmap_flags = flags\n\n def __enter__(self) -> None:\n pass\n\n def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:\n from torch.utils.serialization import config\n config.load.mmap_flags = self.prev", + "docstring": "Context manager or function to set default mmap options for :func: with ``", + "type": "class", + "file_path": "pytorch\\torch\\serialization.py", + "ast_data": "ClassDef name:set_default_mmap_options FunctionDef name:__init__ arg:self arg:flags arguments arg arg If Raise Call If BoolOp Compare Compare Raise Call Assign Assign FunctionDef name:__enter__ arg:self arguments arg FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg Assign" + }, + { + "library": "tensorflow", + "name": "getfile", + "source_code": "def getfile(object):\n unwrapped_object = tf_decorator.unwrap(object)[1]\n if hasattr(unwrapped_object, 'f_globals') and '__file__' in unwrapped_object.f_globals:\n return unwrapped_object.f_globals['__file__']\n return _inspect.getfile(unwrapped_object)", + "docstring": "TFDecorator-aware replacement for inspect.getfile.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py", + "ast_data": "FunctionDef name:getfile arg:object arguments arg Assign Call If BoolOp Call Compare Return return:yes Return return:yes Call" + }, + { + "library": "pygame", + "name": "set_underline", + "source_code": "def set_underline(self, value):\n self.underline = bool(value)", + "docstring": "set_underline(bool) -> None control if text is rendered with an underline", + "type": "method", + "file_path": "pygame\\src_py\\ftfont.py", + "ast_data": "FunctionDef name:set_underline arg:self arg:value arguments arg arg Assign Call" + }, + { + "library": "scipy", + "name": "_rdot", + "source_code": "def _rdot(self, x):\n if isinstance(x, LinearOperator):\n return _ProductLinearOperator(x, self)\n elif np.isscalar(x):\n return _ScaledLinearOperator(self, x)\n else:\n if not issparse(x) and (not is_pydata_spmatrix(x)):\n x = np.asarray(x)\n if x.ndim == 1 or (x.ndim == 2 and x.shape[0] == 1):\n return self.T.matvec(x.T).T\n elif x.ndim == 2:\n return self.T.matmat(x.T).T\n else:\n raise ValueError(f'expected 1-d or 2-d array or matrix, got {x!r}')", + "docstring": "Matrix-matrix or matrix-vector multiplication from the right. Parameters ---------- x : array_like 1-d or 2-d array, representing a vector or matrix. Returns ------- xA : array 1-d or 2-d array (depending on the shape of x) that represents the result of applying this linear operator on x from the right. Notes ----- This is copied from dot to implement right multiplication.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py", + "ast_data": "FunctionDef name:_rdot arg:self arg:x arguments arg arg If Call Return return:yes Call If Call Return return:yes Call If BoolOp Call Call Assign Call If BoolOp Compare BoolOp Compare Compare Return return:yes Call If Compare Return return:yes Call Raise Call" + }, + { + "library": "tensorflow", + "name": "name", + "source_code": "@property\ndef name(self):\n feature_names = []\n for key in _collect_leaf_level_keys(self):\n if isinstance(key, (fc_types.FeatureColumn, fc_old._FeatureColumn)):\n feature_names.append(key.name)\n else:\n feature_names.append(key)\n return '_X_'.join(sorted(feature_names))", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Assign For Call If Call Call Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_dtype_to_na_value", + "source_code": "def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool):\n if isinstance(dtype, ExtensionDtype):\n return dtype.na_value\n elif dtype.kind in 'mM':\n return dtype.type('NaT')\n elif dtype.kind in 'fc':\n return dtype.type('NaN')\n elif dtype.kind == 'b':\n return None\n elif dtype.kind in 'iu':\n if not has_none_blocks:\n return None\n return np.nan\n elif dtype.kind == 'O':\n return np.nan\n raise NotImplementedError", + "docstring": "Find the NA value to go with this dtype.", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\concat.py", + "ast_data": "FunctionDef name:_dtype_to_na_value arg:dtype arg:has_none_blocks arguments arg arg If Call Return return:yes If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:no If Compare If Return return:no Return return:yes If Compare Return return:yes Raise" + }, + { + "library": "tensorflow", + "name": "load_op_library", + "source_code": "@tf_export('load_op_library')\ndef load_op_library(library_filename):\n lib_handle = py_tf.TF_LoadLibrary(library_filename)\n try:\n wrappers = _pywrap_python_op_gen.GetPythonWrappers(py_tf.TF_GetOpList(lib_handle))\n finally:\n py_tf.TF_DeleteLibraryHandle(lib_handle)\n module_name = hashlib.sha1(wrappers).hexdigest()\n if module_name in sys.modules:\n return sys.modules[module_name]\n module_spec = importlib.machinery.ModuleSpec(module_name, None)\n module = importlib.util.module_from_spec(module_spec)\n exec(wrappers, module.__dict__)\n setattr(module, '_IS_TENSORFLOW_PLUGIN', True)\n sys.modules[module_name] = module\n return module", + "docstring": "Loads a TensorFlow plugin, containing custom ops and kernels. Pass \"library_filename\" to a platform-specific mechanism for dynamically loading a library. The rules for determining the exact location of the library are platform-specific and are not documented here. When the library is loaded, ops and kernels registered in the library via the macros are made available in the TensorFlow process. Note that ops with the same name as an existing op are rejected and not registered with the process. Args: library_filename: Path to the plugin. Relative or absolute filesystem path to a dynamic library file. Returns: A python module containing the Python wrappers for Ops defined in the plugin. Raises: RuntimeError: when unable to load the library or get the python wrappers.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\load_library.py", + "ast_data": "FunctionDef name:load_op_library arg:library_filename arguments arg Assign Call Try Assign Call Call Call Assign Call Call If Compare Return return:yes Assign Call Assign Call Call Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_numeric_verify_tensor_details", + "source_code": "def _get_numeric_verify_tensor_details(self) -> List[str]:\n if not self._numeric_verify_tensor_details:\n self._numeric_verify_tensor_details = []\n self._numeric_verify_op_details = {}\n for op_info in self._quant_interpreter._get_ops_details():\n if op_info['op_name'] == _NUMERIC_VERIFY_OP_NAME:\n self._numeric_verify_tensor_details.append(self._quant_interpreter._get_tensor_details(op_info['outputs'][0], subgraph_index=0))\n tensor_name = self._numeric_verify_tensor_details[-1]['name']\n self._numeric_verify_op_details[tensor_name] = op_info\n return self._numeric_verify_tensor_details", + "docstring": "Returns all names of all tensors from NumericVerify op.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py", + "ast_data": "FunctionDef name:_get_numeric_verify_tensor_details arg:self arguments arg If Assign Assign For Call If Compare Call Call Assign Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "pop", + "source_code": "def pop(self, key, default=missing):\n if not self.loaded:\n self.load()\n if default is missing:\n return self._data.pop(key)\n else:\n return self._data.pop(key, default)", + "docstring": "Remove the specified key and return the corresponding value. If key is not found, default is returned if given, otherwise KeyError is raised.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:pop arg:self arg:key arg:default arguments arg arg arg If Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "from_ordinals", + "source_code": "@classmethod\ndef from_ordinals(cls, ordinals, *, freq, name=None) -> Self:\n ordinals = np.asarray(ordinals, dtype=np.int64)\n dtype = PeriodDtype(freq)\n data = PeriodArray._simple_new(ordinals, dtype=dtype)\n return cls._simple_new(data, name=name)", + "docstring": "Construct a PeriodIndex from ordinals. Parameters ---------- ordinals : array-like of int The period offsets from the proleptic Gregorian epoch. freq : str or period object One of pandas period strings or corresponding objects. name : str, default None Name of the resulting PeriodIndex. Returns ------- PeriodIndex See Also -------- PeriodIndex.from_fields : Construct a PeriodIndex from fields (year, month, day, etc.). PeriodIndex.to_timestamp : Cast to DatetimeArray/Index. Examples -------- >>> idx = pd.PeriodIndex.from_ordinals([-1, 0, 1], freq=\"Q\") >>> idx PeriodIndex(['1969Q4', '1970Q1', '1970Q2'], dtype='period[Q-DEC]')", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\period.py", + "ast_data": "FunctionDef name:from_ordinals arg:cls arg:ordinals arguments arg arg arg arg Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "SparseCoefMixin", + "source_code": "class SparseCoefMixin:\n\n def densify(self):\n msg = 'Estimator, %(name)s, must be fitted before densifying.'\n check_is_fitted(self, msg=msg)\n if sp.issparse(self.coef_):\n self.coef_ = self.coef_.toarray()\n return self\n\n def sparsify(self):\n msg = 'Estimator, %(name)s, must be fitted before sparsifying.'\n check_is_fitted(self, msg=msg)\n self.coef_ = sp.csr_matrix(self.coef_)\n return self", + "docstring": "Mixin for converting coef_ to and from CSR format. L1-regularizing estimators should inherit this.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py", + "ast_data": "ClassDef name:SparseCoefMixin FunctionDef name:densify arg:self arguments arg Assign Call If Call Assign Call Return return:yes FunctionDef name:sparsify arg:self arguments arg Assign Call Assign Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "clear", + "source_code": "def clear(self):\n self.__dict__.clear()", + "docstring": "Remove all attributes of self.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\__init__.py", + "ast_data": "FunctionDef name:clear arg:self arguments arg Call" + }, + { + "library": "tensorflow", + "name": "declared_input_types", + "source_code": "@property\ndef declared_input_types(self):\n return self._input_types", + "docstring": "Returns the list of data types of explicit declared inputs.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py", + "ast_data": "FunctionDef name:declared_input_types arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_device_index", + "source_code": "def _get_device_index(device: Any, optional: bool=False, allow_cpu: bool=False) -> int:\n if isinstance(device, int):\n return device\n if isinstance(device, str):\n device = torch.device(device)\n if isinstance(device, torch.device):\n if allow_cpu:\n if device.type not in ['xpu', 'cpu']:\n raise ValueError(f'Expected a xpu or cpu device, but got: {device}')\n elif device.type != 'xpu':\n raise ValueError(f'Expected a xpu device, but got: {device}')\n if not torch.jit.is_scripting():\n if isinstance(device, torch.xpu.device):\n return device.idx\n return _torch_get_device_index(device, optional, allow_cpu)", + "docstring": "Get the device index from :attr:, which can be a torch.device object, a Python integer, or `deviceoptionalallow_cpudevicedeviceoptional`.", + "type": "function", + "file_path": "pytorch\\torch\\xpu\\_utils.py", + "ast_data": "FunctionDef name:_get_device_index arg:device arg:optional arg:allow_cpu arguments arg arg arg If Call Return return:yes If Call Assign Call If Call If If Compare Raise Call If Compare Raise Call If Call If Call Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "id", + "source_code": "def id(self):\n return self._mangler.parent_name()", + "docstring": "Returns internal identifier that torch.package uses to distinguish :class: instances. Looks like::", + "type": "method", + "file_path": "pytorch\\torch\\package\\package_importer.py", + "ast_data": "FunctionDef name:id arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_can_use_numexpr", + "source_code": "def _can_use_numexpr(op, op_str, left_op, right_op, dtype_check) -> bool:\n if op_str is not None:\n if left_op.size > _MIN_ELEMENTS:\n dtypes: set[str] = set()\n for o in [left_op, right_op]:\n if hasattr(o, 'dtype'):\n dtypes |= {o.dtype.name}\n if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:\n return True\n return False", + "docstring": "return left_op boolean if we WILL be using numexpr", + "type": "function", + "file_path": "pandas\\pandas\\core\\computation\\expressions.py", + "ast_data": "FunctionDef name:_can_use_numexpr arg:op arg:op_str arg:left_op arg:right_op arg:dtype_check arguments arg arg arg arg arg If Compare If Compare Call For If Call If BoolOp Call Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "set_global_generator", + "source_code": "@tf_export('random.set_global_generator', 'random.experimental.set_global_generator')\ndef set_global_generator(generator):\n global global_generator\n global_generator = generator", + "docstring": "Replaces the global generator with another object. This function replaces the global generator with the provided object. A random number generator utilizes a object to store its state. The user shall be aware of caveats how interacts with : - tf.function puts restrictions on Variable creation thus one cannot freely create a new random generator instance inside . To call inside , the generator instance must have already been created eagerly. - tf.function captures the Variable during trace-compilation, thus a compiled f.function will not be affected as demonstrated by random_test.py/RandomTest.testResetGlobalGeneratorBadWithDefun . For most use cases, avoid calling after program initialization, and prefer to reset the state of the existing global generator instead, such as, >>> rng = tf.random.get_global_generator() >>> rng.reset_from_seed(30) Args: generator: the new object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py", + "ast_data": "FunctionDef name:set_global_generator arg:generator arguments arg Assign Call" + }, + { + "library": "kornia", + "name": "normalize_homography", + "source_code": "def normalize_homography(dst_pix_trans_src_pix: Tensor, dsize_src: tuple[int, int], dsize_dst: tuple[int, int]) -> Tensor:\n if not isinstance(dst_pix_trans_src_pix, Tensor):\n raise TypeError(f'Input type is not a Tensor. Got {type(dst_pix_trans_src_pix)}')\n if not (len(dst_pix_trans_src_pix.shape) == 3 or dst_pix_trans_src_pix.shape[-2:] == (3, 3)):\n raise ValueError(f'Input dst_pix_trans_src_pix must be a Bx3x3 tensor. Got {dst_pix_trans_src_pix.shape}')\n src_h, src_w = dsize_src\n dst_h, dst_w = dsize_dst\n src_norm_trans_src_pix: Tensor = normal_transform_pixel(src_h, src_w).to(dst_pix_trans_src_pix)\n src_pix_trans_src_norm = _torch_inverse_cast(src_norm_trans_src_pix)\n dst_norm_trans_dst_pix: Tensor = normal_transform_pixel(dst_h, dst_w).to(dst_pix_trans_src_pix)\n dst_norm_trans_src_norm: Tensor = dst_norm_trans_dst_pix @ (dst_pix_trans_src_pix @ src_pix_trans_src_norm)\n return dst_norm_trans_src_norm", + "docstring": "Normalize a given homography in pixels to [-1, 1]. Args: dst_pix_trans_src_pix: homography/ies from source to destination to be normalized. :math: dsize_src: size of the source image (height, width). dsize_dst: size of the destination image (height, width). Returns: the normalized homography of shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:normalize_homography arg:dst_pix_trans_src_pix arg:dsize_src arg:dsize_dst arguments arg arg arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Assign Assign Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "prepare_run_debug_urls", + "source_code": "@abc.abstractmethod\ndef prepare_run_debug_urls(self, fetches, feed_dict):\n pass", + "docstring": "Abstract method to be implemented by concrete subclasses. This method prepares the run-specific debug URL(s). Args: fetches: Same as the argument to feed_dict: Same as the argument to Returns: debug_urls: ( or of ) Debug URLs to be used in this call.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py", + "ast_data": "FunctionDef name:prepare_run_debug_urls arg:self arg:fetches arg:feed_dict arguments arg arg arg" + }, + { + "library": "pytorch", + "name": "TensorAsKey", + "source_code": "class TensorAsKey:\n\n def __init__(self, obj):\n\n def get_tensor_key(obj):\n assert not (obj.dtype.is_floating_point or obj.dtype.is_complex), obj.dtype\n return (obj.data_ptr(), obj.storage_offset(), obj.shape, obj.stride(), obj.dtype)\n self._obj_ref = weakref.ref(obj)\n if obj.layout is torch.strided:\n self.key = get_tensor_key(obj)\n elif obj.layout in {torch.sparse_csr, torch.sparse_bsr}:\n self.key = (get_tensor_key(obj.crow_indices()), get_tensor_key(obj.col_indices()))\n elif obj.layout in {torch.sparse_csc, torch.sparse_bsc}:\n self.key = (get_tensor_key(obj.ccol_indices()), get_tensor_key(obj.row_indices()))\n else:\n raise NotImplementedError(obj.layout)\n self._hash = hash(self.key)\n\n def __hash__(self):\n return self._hash\n\n def __eq__(self, other):\n if not isinstance(other, TensorAsKey):\n return False\n if self.obj is None or other.obj is None:\n return self is other\n return self.key == other.key\n\n @property\n def obj(self):\n return self._obj_ref()", + "docstring": "A light-weight wrapper of a tensor that enables storing tensors as keys with efficient memory reference based comparision as an approximation to data equality based keys. Motivation: the hash value of a torch tensor is tensor instance based that does not use data equality and makes the usage of tensors as keys less useful. For instance, the result of `2crow_indices` would return False.", + "type": "class", + "file_path": "pytorch\\torch\\sparse\\_triton_ops.py", + "ast_data": "ClassDef name:TensorAsKey FunctionDef name:__init__ arg:self arg:obj arguments arg arg FunctionDef name:get_tensor_key arg:obj arguments arg BoolOp Return return:yes Call Call Call Assign Call If Compare Assign Call If Compare Assign Call Call Call Call If Compare Assign Call Call Call Call Raise Call Assign Call FunctionDef name:__hash__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes If BoolOp Compare Compare Return return:yes Compare Return return:yes Compare FunctionDef name:obj arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "createResolutionCallbackFromClosure", + "source_code": "def createResolutionCallbackFromClosure(fn):\n closure = get_closure(fn)\n\n class closure_lookup:\n\n def __getattr__(self, key):\n if key in closure:\n return closure[key]\n elif hasattr(typing, key):\n return getattr(typing, key)\n elif hasattr(builtins, key):\n return getattr(builtins, key)\n return None\n return createResolutionCallbackFromEnv(closure_lookup())", + "docstring": "Create a resolutionCallback by introspecting the function instead of looking up the stack for the enclosing scope", + "type": "function", + "file_path": "pytorch\\torch\\_jit_internal.py", + "ast_data": "FunctionDef name:createResolutionCallbackFromClosure arg:fn arguments arg Assign Call ClassDef name:closure_lookup FunctionDef name:__getattr__ arg:self arg:key arguments arg arg If Compare Return return:yes If Call Return return:yes Call If Call Return return:yes Call Return return:no Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "depth_to_normals", + "source_code": "def depth_to_normals(depth: Tensor, camera_matrix: Tensor, normalize_points: bool=False) -> Tensor:\n KORNIA_CHECK_IS_TENSOR(depth)\n KORNIA_CHECK_IS_TENSOR(camera_matrix)\n KORNIA_CHECK_SHAPE(depth, ['B', '1', 'H', 'W'])\n KORNIA_CHECK_SHAPE(camera_matrix, ['B', '3', '3'])\n xyz: Tensor = depth_to_3d(depth, camera_matrix, normalize_points)\n gradients: Tensor = spatial_gradient(xyz)\n a, b = (gradients[:, :, 0], gradients[:, :, 1])\n normals: Tensor = torch.cross(a, b, dim=1)\n return kornia_ops.normalize(normals, dim=1, p=2)", + "docstring": "Compute the normal surface per pixel. Args: depth: image tensor containing a depth value per pixel with shape :math:. camera_matrix: tensor containing the camera intrinsics with shape :math:. normalize_points: whether to normalize the pointcloud. This must be set to when the depth is represented as the Euclidean ray length from the camera position. Return: tensor with a normal surface vector per pixel of the same resolution as the input :math:. Example: >>> depth = torch.rand(1, 1, 4, 4) >>> K = torch.eye(3)[None] >>> depth_to_normals(depth, K).shape torch.Size([1, 3, 4, 4])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\depth.py", + "ast_data": "FunctionDef name:depth_to_normals arg:depth arg:camera_matrix arg:normalize_points arguments arg arg arg Call Call Call Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "as_table", + "source_code": "def as_table(self):\n return self.render(self.template_name_table)", + "docstring": "Render as elements excluding the surrounding tag.", + "type": "method", + "file_path": "django\\django\\forms\\utils.py", + "ast_data": "FunctionDef name:as_table arg:self arguments arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "compute_grad", + "source_code": "def compute_grad(J, f):\n if isinstance(J, LinearOperator):\n return J.rmatvec(f)\n else:\n return J.T.dot(f)", + "docstring": "Compute gradient of the least-squares cost function.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py", + "ast_data": "FunctionDef name:compute_grad arg:J arg:f arguments arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "ExpressionWrapper", + "source_code": "@deconstructible(path='django.db.models.ExpressionWrapper')\nclass ExpressionWrapper(SQLiteNumericMixin, Expression):\n\n def __init__(self, expression, output_field):\n super().__init__(output_field=output_field)\n self.expression = expression\n\n def set_source_expressions(self, exprs):\n self.expression = exprs[0]\n\n def get_source_expressions(self):\n return [self.expression]\n\n def get_group_by_cols(self):\n if isinstance(self.expression, Expression):\n expression = self.expression.copy()\n expression.output_field = self.output_field\n return expression.get_group_by_cols()\n return super().get_group_by_cols()\n\n def as_sql(self, compiler, connection):\n return compiler.compile(self.expression)\n\n def __repr__(self):\n return '{}({})'.format(self.__class__.__name__, self.expression)\n\n @property\n def allowed_default(self):\n return self.expression.allowed_default", + "docstring": "An expression that can wrap another expression so that it can provide extra context to the inner expression, such as the output_field.", + "type": "class", + "file_path": "django\\django\\db\\models\\expressions.py", + "ast_data": "ClassDef name:ExpressionWrapper FunctionDef name:__init__ arg:self arg:expression arg:output_field arguments arg arg arg Call Call Assign FunctionDef name:set_source_expressions arg:self arg:exprs arguments arg arg Assign FunctionDef name:get_source_expressions arg:self arguments arg Return return:yes FunctionDef name:get_group_by_cols arg:self arguments arg If Call Assign Call Assign Return return:yes Call Return return:yes Call Call FunctionDef name:as_sql arg:self arg:compiler arg:connection arguments arg arg arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call FunctionDef name:allowed_default arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_matching_files_v2", + "source_code": "@tf_export('io.gfile.glob')\ndef get_matching_files_v2(pattern):\n if isinstance(pattern, six.string_types):\n return [compat.as_str_any(matching_filename) for matching_filename in _pywrap_file_io.GetMatchingFiles(compat.as_bytes(pattern))]\n else:\n return [compat.as_str_any(matching_filename) for single_filename in pattern for matching_filename in _pywrap_file_io.GetMatchingFiles(compat.as_bytes(single_filename))]", + "docstring": "Returns a list of files that match the given pattern(s). The patterns are defined as strings. Supported patterns are defined here. Note that the pattern can be a Python iteratable of string patterns. The format definition of the pattern is: **pattern**: **term**: * : matches any sequence of non-'/' characters * : matches a single non-'/' character * : matches any single character (not) on the list * : matches character where * : matches character **character range**: * : matches character while * : matches character * : matches character for GetMatchingPathscore/platform/file_system.h`] (../../../core/platform/file_system.h) for implementation details. Args: pattern: string or iterable of strings. The glob pattern(s). Returns: A list of strings containing filenames that match the given pattern(s). Raises: errors.OpError: If there are filesystem / directory listing errors. errors.NotFoundError: If pattern to be matched is an invalid directory.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", + "ast_data": "FunctionDef name:get_matching_files_v2 arg:pattern arguments arg If Call Return return:yes Call Call Call Return return:yes Call Call Call Call" + }, + { + "library": "authlib", + "name": "UnauthorizedClientError", + "source_code": "class UnauthorizedClientError(OAuth2Error):\n error = 'unauthorized_client'", + "docstring": "The authenticated client is not authorized to use this authorization grant type.", + "type": "class", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\errors.py", + "ast_data": "ClassDef name:UnauthorizedClientError Assign" + }, + { + "library": "tensorflow", + "name": "calibrate", + "source_code": "@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.CALIBRATE)\ndef calibrate(self, dataset_gen):\n self._feed_tensors(dataset_gen, resize_input=True)\n return self._calibrator.Calibrate()", + "docstring": "Calibrates the model with specified generator. Returns: A model with min and max calibration stats. Args: dataset_gen: A generator that generates calibration samples.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\optimize\\calibrator.py", + "ast_data": "FunctionDef name:calibrate arg:self arg:dataset_gen arguments arg arg Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_transformed_name", + "source_code": "def get_transformed_name(self, node):\n if isinstance(node, gast.Lambda):\n return 'lam'\n elif isinstance(node, gast.FunctionDef):\n return node.name\n raise ValueError('Unknown node type {}'.format(node))", + "docstring": "Returns a name for the output function. Subclasses may override this.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transpiler.py", + "ast_data": "FunctionDef name:get_transformed_name arg:self arg:node arguments arg arg If Call Return return:yes If Call Return return:yes Raise Call Call" + }, + { + "library": "tensorflow", + "name": "DistributionMultiWorkerTrainingLoop", + "source_code": "class DistributionMultiWorkerTrainingLoop(training_utils_v1.TrainingLoop):\n\n def __init__(self, single_worker_loop):\n self._single_worker_loop = single_worker_loop\n\n def fit(self, *args, **kwargs):\n return _train_with_multi_worker(self._single_worker_loop.fit)(*args, **kwargs)\n\n def evaluate(self, *args, **kwargs):\n return _train_with_multi_worker(self._single_worker_loop.evaluate)(*args, **kwargs)\n\n def predict(self, *args, **kwargs):\n return self._single_worker_loop.predict(*args, **kwargs)", + "docstring": "Training loop for distribution strategy with multiple worker.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_distributed_v1.py", + "ast_data": "ClassDef name:DistributionMultiWorkerTrainingLoop FunctionDef name:__init__ arg:self arg:single_worker_loop arguments arg arg Assign FunctionDef name:fit arg:self arguments arg arg arg Return return:yes Call Call FunctionDef name:evaluate arg:self arguments arg arg arg Return return:yes Call Call FunctionDef name:predict arg:self arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_maybe_assert_valid_sample", + "source_code": "def _maybe_assert_valid_sample(self, counts):\n if not self.validate_args:\n return counts\n counts = distribution_util.embed_check_nonnegative_integer_form(counts)\n return control_flow_ops.with_dependencies([check_ops.assert_equal(self.total_count, math_ops.reduce_sum(counts, -1), message='counts must sum to `self.total_count`')], counts)", + "docstring": "Check counts for proper shape, values, then return tensor version.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\multinomial.py", + "ast_data": "FunctionDef name:_maybe_assert_valid_sample arg:self arg:counts arguments arg arg If Return return:yes Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "solvevec", + "source_code": "def solvevec(self, rhs, adjoint=False, name='solve'):\n with self._name_scope(name):\n rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch(rhs, name='rhs')\n self._check_input_dtype(rhs)\n self_dim = -1 if adjoint else -2\n tensor_shape.dimension_at_index(self.shape, self_dim).assert_is_compatible_with(rhs.shape[-1])\n return self._solvevec(rhs, adjoint=adjoint)", + "docstring": "Solve single equation with best effort: . The returned will be close to an exact solution if is well conditioned. Otherwise closeness will vary. See class docstring for details. Examples: Args: rhs: with same as this operator. is treated like a [batch] vector meaning for every set of leading dimensions, the last dimension defines a vector. See class docstring for definition of compatibility regarding batch dimensions. adjoint: Python . If , solve the system involving the adjoint of this : . name: A name scope to use for ops added by this method. Returns: with shape and same as . Raises: NotImplementedError: If or is False.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:solvevec arg:self arg:rhs arg:adjoint arg:name arguments arg arg arg arg With Call Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_unary_op", + "source_code": "def _unary_op(fn):\n\n def unary_op_wrapper(x, name=None):\n return fn(x, name=name)\n return unary_op_wrapper", + "docstring": "Wrapper that restricts to have the correct signature.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\tf2xla\\python\\xla.py", + "ast_data": "FunctionDef name:_unary_op arg:fn arguments arg FunctionDef name:unary_op_wrapper arg:x arg:name arguments arg arg Return return:yes Call Return return:yes" + }, + { + "library": "kornia", + "name": "_draw_pixel", + "source_code": "def _draw_pixel(image: torch.Tensor, x: int, y: int, color: torch.Tensor) -> None:\n image[:, y, x] = color", + "docstring": "Draws a pixel into an image. Args: image: the input image to where to draw the lines with shape :math. x: the x coordinate of the pixel. y: the y coordinate of the pixel. color: the color of the pixel with :math where :math is the number of channels of the image. Return: Nothing is returned.", + "type": "function", + "file_path": "kornia\\kornia\\utils\\draw.py", + "ast_data": "FunctionDef name:_draw_pixel arg:image arg:x arg:y arg:color arguments arg arg arg arg Assign" + }, + { + "library": "django", + "name": "OGREnvelope", + "source_code": "class OGREnvelope(Structure):\n _fields_ = [('MinX', c_double), ('MaxX', c_double), ('MinY', c_double), ('MaxY', c_double)]", + "docstring": "Represent the OGREnvelope C Structure.", + "type": "class", + "file_path": "django\\django\\contrib\\gis\\gdal\\envelope.py", + "ast_data": "ClassDef name:OGREnvelope Assign" + }, + { + "library": "pytorch", + "name": "_arg_has_complex_dtype", + "source_code": "def _arg_has_complex_dtype(arg) -> bool:\n if isinstance(arg, torch.fx.Node) and 'val' in arg.meta and isinstance(arg.meta['val'], torch.Tensor) and torch.is_complex(arg.meta['val']):\n return True\n elif isinstance(arg, list):\n return any((_arg_has_complex_dtype(item) for item in arg))\n return False", + "docstring": "Check if the node has complex dtype recursively.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_dispatching.py", + "ast_data": "FunctionDef name:_arg_has_complex_dtype arg:arg arguments arg If BoolOp Call Compare Call Call Return return:yes If Call Return return:yes Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "components", + "source_code": "@property\ndef components(self) -> DataFrame:\n return self._get_values().components.set_index(self._parent.index).__finalize__(self._parent)", + "docstring": "Return a Dataframe of the components of the Timedeltas. Each row of the DataFrame corresponds to a Timedelta in the original Series and contains the individual components (days, hours, minutes, seconds, milliseconds, microseconds, nanoseconds) of the Timedelta. Returns ------- DataFrame See Also -------- TimedeltaIndex.components : Return a DataFrame of the individual resolution components of the Timedeltas. Series.dt.total_seconds : Return the total number of seconds in the duration. Examples -------- >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit=\"s\")) >>> s 0 0 days 00:00:00 1 0 days 00:00:01 2 0 days 00:00:02 3 0 days 00:00:03 4 0 days 00:00:04 dtype: timedelta64[ns] >>> s.dt.components days hours minutes seconds milliseconds microseconds nanoseconds 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 2 0 0 0 2 0 0 0 3 0 0 0 3 0 0 0 4 0 0 0 4 0 0 0", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\accessors.py", + "ast_data": "FunctionDef name:components arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "start", + "source_code": "def start(self) -> None:\n log.warning('No health check server started')", + "docstring": "Unsupported functionality for Pytorch, doesn't start any health check server", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\health_check_server.py", + "ast_data": "FunctionDef name:start arg:self arguments arg Call" + }, + { + "library": "django", + "name": "ManyToManyRawIdWidget", + "source_code": "class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):\n template_name = 'admin/widgets/many_to_many_raw_id.html'\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n if self.admin_site.is_registered(self.rel.model):\n context['widget']['attrs']['class'] = 'vManyToManyRawIdAdminField'\n return context\n\n def url_parameters(self):\n return self.base_url_parameters()\n\n def label_and_url_for_value(self, value):\n return ('', '')\n\n def value_from_datadict(self, data, files, name):\n value = data.get(name)\n if value:\n return value.split(',')\n\n def format_value(self, value):\n return ','.join((str(v) for v in value)) if value else ''", + "docstring": "A Widget for displaying ManyToMany ids in the \"raw_id\" interface rather than in a box.", + "type": "class", + "file_path": "django\\django\\contrib\\admin\\widgets.py", + "ast_data": "ClassDef name:ManyToManyRawIdWidget Assign FunctionDef name:get_context arg:self arg:name arg:value arg:attrs arguments arg arg arg arg Assign Call Call If Call Assign Return return:yes FunctionDef name:url_parameters arg:self arguments arg Return return:yes Call FunctionDef name:label_and_url_for_value arg:self arg:value arguments arg arg Return return:yes FunctionDef name:value_from_datadict arg:self arg:data arg:files arg:name arguments arg arg arg arg Assign Call If Return return:yes Call FunctionDef name:format_value arg:self arg:value arguments arg arg Return return:yes Call Call" + }, + { + "library": "seaborn", + "name": "tight_layout", + "source_code": "def tight_layout(self, *args, **kwargs):\n kwargs = kwargs.copy()\n kwargs.setdefault('rect', self._tight_layout_rect)\n if self._tight_layout_pad is not None:\n kwargs.setdefault('pad', self._tight_layout_pad)\n self._figure.tight_layout(*args, **kwargs)\n return self", + "docstring": "Call fig.tight_layout within rect that exclude the legend.", + "type": "method", + "file_path": "seaborn\\seaborn\\axisgrid.py", + "ast_data": "FunctionDef name:tight_layout arg:self arguments arg arg arg Assign Call Call If Compare Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "_ensure_key_mapped_multiindex", + "source_code": "def _ensure_key_mapped_multiindex(index: MultiIndex, key: Callable, level=None) -> MultiIndex:\n if level is not None:\n if isinstance(level, (str, int)):\n level_iter = [level]\n else:\n level_iter = level\n sort_levels: range | set = {index._get_level_number(lev) for lev in level_iter}\n else:\n sort_levels = range(index.nlevels)\n mapped = [ensure_key_mapped(index._get_level_values(level), key) if level in sort_levels else index._get_level_values(level) for level in range(index.nlevels)]\n return type(index).from_arrays(mapped)", + "docstring": "Returns a new MultiIndex in which key has been applied to all levels specified in level (or all levels if level is None). Used for key sorting for MultiIndex. Parameters ---------- index : MultiIndex Index to which to apply the key function on the specified levels. key : Callable Function that takes an Index and returns an Index of the same shape. This key is applied to each level separately. The name of the level can be used to distinguish different levels for application. level : list-like, int or str, default None Level or list of levels to apply the key function to. If None, key function is applied to all levels. Other levels are left unchanged. Returns ------- labels : MultiIndex Resulting MultiIndex with modified levels.", + "type": "function", + "file_path": "pandas\\pandas\\core\\sorting.py", + "ast_data": "FunctionDef name:_ensure_key_mapped_multiindex arg:index arg:key arg:level arguments arg arg arg If Compare If Call Assign Assign Call Assign Call Assign Compare Call Call Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_load_distributed_snapshot", + "source_code": "def _load_distributed_snapshot(path: str, metadata: snapshot_pb2.DistributedSnapshotMetadata, reader_func: Callable[[dataset_ops.Dataset], dataset_ops.Dataset]) -> dataset_ops.Dataset:\n dataset = _ListSnapshotChunksDataset(path)\n dataset = dataset.map(lambda chunk_file: _SnapshotChunkDataset(chunk_file, element_spec=_parse_element_spec(metadata.element_spec), compression=metadata.compression))\n return reader_func(dataset)", + "docstring": "Loads a distributed snapshot.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\load_op.py", + "ast_data": "FunctionDef name:_load_distributed_snapshot arg:path arg:metadata arg:reader_func arguments arg arg arg Assign Call Assign Call arguments arg Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_rank", + "source_code": "def _rank(self) -> NoReturn:\n raise NotImplementedError()", + "docstring": "Integer rank of this Tensor. Unlike regular Tensors, the rank is always known for EagerTensors. This is more performant than len(self._shape_tuple()) Returns: Integer rank", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_rank arg:self arguments arg Raise Call" + }, + { + "library": "numpy", + "name": "_needs_add_docstring", + "source_code": "def _needs_add_docstring(obj):\n Py_TPFLAGS_HEAPTYPE = 1 << 9\n if isinstance(obj, (types.FunctionType, types.MethodType, property)):\n return False\n if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE:\n return False\n return True", + "docstring": "Returns true if the only way to set the docstring of from python is via add_docstring. This function errs on the side of being overly conservative.", + "type": "function", + "file_path": "numpy\\numpy\\_core\\function_base.py", + "ast_data": "FunctionDef name:_needs_add_docstring arg:obj arguments arg Assign If Call Return return:yes If BoolOp Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "_RewriteInfo", + "source_code": "@dataclass\nclass _RewriteInfo:\n example_inputs: tuple[Any, ...]\n pattern: Callable\n replacement: Callable\n pattern_post_trans: Optional[Callable[[GraphModule], GraphModule]] = None\n replacement_post_trans: Optional[Callable[[GraphModule], GraphModule]] = None", + "docstring": "Data needed for rewrite, this includes example inputs, pattern and replacement functions and post transformation functions for the exported pattern and replacement GraphModule", + "type": "class", + "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\representation\\rewrite.py", + "ast_data": "ClassDef name:_RewriteInfo" + }, + { + "library": "pandas", + "name": "ParserError", + "source_code": "class ParserError(ValueError):\n pass", + "docstring": "Exception that is raised by an error encountered in parsing file contents. This is a generic error raised for errors encountered when functions like or are parsing contents of a file. See Also -------- read_csv : Read CSV (comma-separated) file into a DataFrame. read_html : Read HTML table into a DataFrame. Examples -------- >>> data = '''a,b,c ... cat,foo,bar ... dog,foo,\"baz''' >>> from io import StringIO >>> pd.read_csv(StringIO(data), skipfooter=1, engine=\"python\") Traceback (most recent call last): ParserError: ',' expected after '\"'. Error could possibly be due to parsing errors in the skipped footer rows", + "type": "class", + "file_path": "pandas\\pandas\\errors\\__init__.py", + "ast_data": "ClassDef name:ParserError" + }, + { + "library": "tensorflow", + "name": "initialize_logical_devices", + "source_code": "def initialize_logical_devices():\n context()._initialize_logical_devices()", + "docstring": "Initialize the virtual devices.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:initialize_logical_devices arguments Call Call" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, res, rhs1, rhs2):\n self.res = res\n self.rhs1 = rhs1\n self.rhs2 = rhs2", + "docstring": ":param res: tensor variable that stores the result of the outout :param rhs1: tensor or tensor variable :param rhs2: tensor or tensor variabke", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:res arg:rhs1 arg:rhs2 arguments arg arg arg arg Assign Assign Assign" + }, + { + "library": "django", + "name": "do_static", + "source_code": "@register.tag('static')\ndef do_static(parser, token):\n return StaticNode.handle_token(parser, token)", + "docstring": "Join the given path with the STATIC_URL setting. Usage:: {% static path [as varname] %} Examples:: {% static \"myapp/css/base.css\" %} {% static variable_with_path %} {% static \"myapp/css/base.css\" as admin_base_css %} {% static variable_with_path as varname %}", + "type": "function", + "file_path": "django\\django\\templatetags\\static.py", + "ast_data": "FunctionDef name:do_static arg:parser arg:token arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_validate_arguments", + "source_code": "def _validate_arguments(num_mel_bins, sample_rate, lower_edge_hertz, upper_edge_hertz, dtype):\n if num_mel_bins <= 0:\n raise ValueError('num_mel_bins must be positive. Got: %s' % num_mel_bins)\n if lower_edge_hertz < 0.0:\n raise ValueError('lower_edge_hertz must be non-negative. Got: %s' % lower_edge_hertz)\n if lower_edge_hertz >= upper_edge_hertz:\n raise ValueError('lower_edge_hertz %.1f >= upper_edge_hertz %.1f' % (lower_edge_hertz, upper_edge_hertz))\n if not isinstance(sample_rate, tensor.Tensor):\n if sample_rate <= 0.0:\n raise ValueError('sample_rate must be positive. Got: %s' % sample_rate)\n if upper_edge_hertz > sample_rate / 2:\n raise ValueError('upper_edge_hertz must not be larger than the Nyquist frequency (sample_rate / 2). Got %s for sample_rate: %s' % (upper_edge_hertz, sample_rate))\n if not dtype.is_floating:\n raise ValueError('dtype must be a floating point type. Got: %s' % dtype)", + "docstring": "Checks the inputs to linear_to_mel_weight_matrix.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\mel_ops.py", + "ast_data": "FunctionDef name:_validate_arguments arg:num_mel_bins arg:sample_rate arg:lower_edge_hertz arg:upper_edge_hertz arg:dtype arguments arg arg arg arg arg If Compare Raise Call If Compare Raise Call If Compare Raise Call If Call If Compare Raise Call If Compare Raise Call If Raise Call" + }, + { + "library": "scipy", + "name": "eigenvalues", + "source_code": "def eigenvalues(self, m=None):\n if m is None:\n m = self.n\n arange_plus1 = np.arange(1, m + 1, dtype=np.uint64)\n return arange_plus1 * arange_plus1", + "docstring": "Return the requested number of eigenvalues. Parameters ---------- m : int, optional The positive number of smallest eigenvalues to return. If not provided, then all eigenvalues will be returned. Returns ------- eigenvalues : array The requested smallest or all eigenvalues, in ascending order.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py", + "ast_data": "FunctionDef name:eigenvalues arg:self arg:m arguments arg arg If Compare Assign Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "argsort", + "source_code": "def argsort(self, *, ascending: bool=True, kind: SortKind='quicksort', na_position: str='last', **kwargs) -> np.ndarray:\n ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs)\n values = self._values_for_argsort()\n return nargsort(values, kind=kind, ascending=ascending, na_position=na_position, mask=np.asarray(self.isna()))", + "docstring": "Return the indices that would sort this array. Parameters ---------- ascending : bool, default True Whether the indices should result in an ascending or descending sort. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional Sorting algorithm. na_position : {'first', 'last'}, default 'last' If `numpy.argsort`. If NaN values are contained, NaN values are placed at the end. See Also -------- numpy.argsort : Sorting implementation used internally. Examples -------- >>> arr = pd.array([3, 1, 2, 5, 4]) >>> arr.argsort() array([1, 2, 0, 4, 3])", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:argsort arg:self arguments arg arg arg arg arg Assign Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "_draw_disabled", + "source_code": "def _draw_disabled(self):\n no_ops = {meth_name: functools.update_wrapper(lambda *args, **kwargs: None, getattr(RendererBase, meth_name)) for meth_name in dir(RendererBase) if meth_name.startswith('draw_') or meth_name in ['open_group', 'close_group']}\n return _setattr_cm(self, **no_ops)", + "docstring": "Context manager to temporary disable drawing. This is used for getting the drawn size of Artists. This lets us run the draw process to update any Python state but does not pay the cost of the draw_XYZ calls on the canvas.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:_draw_disabled arg:self arguments arg Assign Call arguments arg arg Call Call BoolOp Call Compare Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "close", + "source_code": "def close(self):\n self.endStream()\n if self.passed_in_file_object:\n self.fh.flush()\n else:\n if self.original_file_like is not None:\n self.original_file_like.write(self.fh.getvalue())\n self.fh.close()", + "docstring": "Flush all buffers and free all resources.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py", + "ast_data": "FunctionDef name:close arg:self arguments arg Call If Call If Compare Call Call Call" + }, + { + "library": "tensorflow", + "name": "variables", + "source_code": "def variables(self):\n return self._opt.variables()", + "docstring": "Fetches a list of optimizer variables in the default graph. This wraps from the actual optimizer. It does not include the 's local step. Returns: A list of variables.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\sync_replicas_optimizer.py", + "ast_data": "FunctionDef name:variables arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_DDPBucketAssignment", + "source_code": "class _DDPBucketAssignment:\n\n def __init__(self, bucket_index: int, parameters: list[torch.Tensor], offset: int):\n self.bucket_index = bucket_index\n self.parameters = parameters\n self.offset = offset\n if len(self.parameters) == 0:\n raise ValueError('Empty bucket assignment')\n self.device: torch.device = self.parameters[0].device\n self.tensor: Optional[torch.Tensor] = None", + "docstring": "Represent a :class: bucket assignment. This means that a (possibly non-strict) subset of the parameters corresponding to a DDP bucket assigned to a rank to update. Attributes: bucket_index (int): index of the bucket determined by the DDP gradient bucket all-reduce order. parameters (List[torch.Tensor]): model parameters in the bucket assigned to this rank. offset (int): offset into the :class: 's :meth: giving the index of the first element in the passed-in `GradBucketgradients`. device (torch.device): device on which the parameters are stored. tensor (torch.Tensor): flattened tensor giving the data of the parameter subset assigned to the rank.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py", + "ast_data": "ClassDef name:_DDPBucketAssignment FunctionDef name:__init__ arg:self arg:bucket_index arg:parameters arg:offset arguments arg arg arg arg Assign Assign Assign If Compare Call Raise Call" + }, + { + "library": "pytorch", + "name": "kernels", + "source_code": "@property\ndef kernels(self):\n for i, kernel in enumerate(self._kernels):\n if isinstance(kernel, CodeCacheFuture):\n self._kernels[i] = kernel.result()\n return self._kernels", + "docstring": "Read results from future. This should be called after parallel compilation is done. In case you call this before compilation is done, it may slow down the parallel compilation.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\multi_kernel.py", + "ast_data": "FunctionDef name:kernels arg:self arguments arg For Call If Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "deserialize", + "source_code": "def deserialize(proto: SerializedTraceType) -> Serializable:\n for proto_class in PROTO_CLASS_TO_PY_CLASS:\n if proto.representation.Is(proto_class.DESCRIPTOR):\n actual_proto = proto_class()\n proto.representation.Unpack(actual_proto)\n return PROTO_CLASS_TO_PY_CLASS[proto_class].experimental_from_proto(actual_proto)\n raise ValueError('Can not deserialize proto of url: ', proto.representation.type_url, ' since no matching Python class could be found. For value ', proto.representation.value)", + "docstring": "Converts a proto SerializedTraceType to instance of Serializable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\serialization.py", + "ast_data": "FunctionDef name:deserialize arg:proto arguments arg For If Call Assign Call Call Return return:yes Call Raise Call" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, X):\n check_is_fitted(self)\n return self.radius_neighbors_graph(X, mode=self.mode, sort_results=True)", + "docstring": "Compute the (weighted) graph of Neighbors for points in X. Parameters ---------- X : array-like of shape (n_samples_transform, n_features) Sample data. Returns ------- Xt : sparse matrix of shape (n_samples_transform, n_samples_fit) Xt[i, j] is assigned the weight of edge that connects i to j. Only the neighbors have an explicit value. The diagonal is always explicit. The matrix is of CSR format.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neighbors\\_graph.py", + "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "MemoizeJac", + "source_code": "class MemoizeJac:\n\n def __init__(self, fun):\n self.fun = fun\n self.jac = None\n self._value = None\n self.x = None\n\n def _compute_if_needed(self, x, *args):\n if not np.all(x == self.x) or self._value is None or self.jac is None:\n self.x = np.asarray(x).copy()\n fg = self.fun(x, *args)\n self.jac = fg[1]\n self._value = fg[0]\n\n def __call__(self, x, *args):\n self._compute_if_needed(x, *args)\n return self._value\n\n def derivative(self, x, *args):\n self._compute_if_needed(x, *args)\n return self.jac", + "docstring": "Decorator that caches the return values of a function returning `` each time it is called.", + "type": "class", + "file_path": "scipy\\scipy\\optimize\\_optimize.py", + "ast_data": "ClassDef name:MemoizeJac FunctionDef name:__init__ arg:self arg:fun arguments arg arg Assign Assign Assign Assign FunctionDef name:_compute_if_needed arg:self arg:x arguments arg arg arg If BoolOp Call Compare Compare Compare Assign Call Call Assign Call Assign Assign FunctionDef name:__call__ arg:self arg:x arguments arg arg arg Call Return return:yes FunctionDef name:derivative arg:self arg:x arguments arg arg arg Call Return return:yes" + }, + { + "library": "scipy", + "name": "get_max_rss_bytes", + "source_code": "def get_max_rss_bytes(rusage):\n if not rusage:\n return None\n if sys.platform.startswith('linux'):\n return rusage.ru_maxrss * 1024\n elif sys.platform == 'darwin':\n return rusage.ru_maxrss\n else:\n return rusage.ru_maxrss", + "docstring": "Extract the max RSS value in bytes.", + "type": "function", + "file_path": "scipy\\benchmarks\\benchmarks\\common.py", + "ast_data": "FunctionDef name:get_max_rss_bytes arg:rusage arguments arg If Return return:no If Call Return return:yes If Compare Return return:yes Return return:yes" + }, + { + "library": "cryptography", + "name": "size", + "source_code": "def size(self) -> int:\n return sum(map(len, self.flist))", + "docstring": "Current number of bytes", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py", + "ast_data": "FunctionDef name:size arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "watch_for_translation_changes", + "source_code": "def watch_for_translation_changes(sender, **kwargs):\n from django.conf import settings\n if settings.USE_I18N:\n directories = [Path('locale')]\n directories.extend((Path(config.path) / 'locale' for config in apps.get_app_configs() if not is_django_module(config.module)))\n directories.extend((Path(p) for p in settings.LOCALE_PATHS))\n for path in directories:\n sender.watch_dir(path, '**/*.mo')", + "docstring": "Register file watchers for .mo files in potential locale paths.", + "type": "function", + "file_path": "django\\django\\utils\\translation\\reloader.py", + "ast_data": "FunctionDef name:watch_for_translation_changes arg:sender arguments arg arg If Assign Call Call Call Call Call Call Call For Call" + }, + { + "library": "tensorflow", + "name": "set_partitioner", + "source_code": "def set_partitioner(self, partitioner):\n self._partitioner = partitioner", + "docstring": "Set partitioner for this scope.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py", + "ast_data": "FunctionDef name:set_partitioner arg:self arg:partitioner arguments arg arg Assign" + }, + { + "library": "matplotlib", + "name": "_create_closed", + "source_code": "@classmethod\ndef _create_closed(cls, vertices):\n v = _to_unmasked_float_array(vertices)\n return cls(np.concatenate([v, v[:1]]), closed=True)", + "docstring": "Create a closed polygonal path going through *vertices*. Unlike `._create_closed`.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\path.py", + "ast_data": "FunctionDef name:_create_closed arg:cls arg:vertices arguments arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_get_window_indexer", + "source_code": "def _get_window_indexer(self) -> GroupbyIndexer:\n window_indexer = GroupbyIndexer(groupby_indices=self._grouper.indices, window_indexer=ExponentialMovingWindowIndexer)\n return window_indexer", + "docstring": "Return an indexer class that will compute the window start and end bounds Returns ------- GroupbyIndexer", + "type": "method", + "file_path": "pandas\\pandas\\core\\window\\ewm.py", + "ast_data": "FunctionDef name:_get_window_indexer arg:self arguments arg Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_GetGradReduced", + "source_code": "def _GetGradReduced(output_grad, output_subs, input_subs, input_shape, reduced_label_set):\n reduced_subs, reduced_dims, reduced_axes = _GetReducedSubscripts(reduced_label_set, input_shape, input_subs)\n has_repeated_labels = len(set(input_subs)) + len(set(output_subs)) < len(input_subs) + len(output_subs)\n input_subs_without_reduced_labels = ''.join([s for s in input_subs if s not in reduced_label_set])\n if not has_repeated_labels and input_subs_without_reduced_labels == output_subs:\n reduced_shape = math_ops.reduced_shape(input_shape, ops.convert_to_tensor(reduced_axes))\n return array_ops.broadcast_to(array_ops.reshape(output_grad, reduced_shape), input_shape)\n grad_shape_with_reduced_labels = array_ops.concat([reduced_dims, array_ops.shape(output_grad)], axis=0)\n reduced_shape = array_ops.concat([array_ops.ones(len(reduced_label_set), dtype=dtypes.int32), array_ops.shape(output_grad)], axis=0)\n broadcasted_grad = array_ops.broadcast_to(array_ops.reshape(output_grad, reduced_shape), grad_shape_with_reduced_labels)\n return gen_linalg_ops.einsum([broadcasted_grad], '{}->{}'.format(reduced_subs + output_subs, input_subs))", + "docstring": "Returns the gradient wrt input for a unary einsum with reductions. Args: output_grad: The gradient wrt the output of a unary einsum operation. output_subs: The output subscript. (E.g. for equation ). input_subs: The input subscript. (E.g. for equation ). input_shape: A representing the shape of the input operand. reduced_label_set: The set of axis labels appearing in but not in .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py", + "ast_data": "FunctionDef name:_GetGradReduced arg:output_grad arg:output_subs arg:input_subs arg:input_shape arg:reduced_label_set arguments arg arg arg arg arg Assign Call Assign Compare Call Call Call Call Call Call Assign Call Compare If BoolOp Compare Assign Call Call Return return:yes Call Call Assign Call Call Assign Call Call Call Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_get_input_shape_for_sparse_tensor", + "source_code": "def _get_input_shape_for_sparse_tensor(self, tensor, feature, per_replica, path) -> TensorShape:\n shape = tensor.shape.as_list()\n if len(shape) < 2:\n raise ValueError('Only rank 2 and above sparse tensor is supported, find rank {} sparse tensor for input {}'.format(len(shape), path))\n if not feature.output_shape and feature.max_sequence_length > 0:\n if len(shape) == 2:\n shape.insert(len(shape) - 1, feature.max_sequence_length)\n if self._num_cores_per_replica and per_replica and shape[0]:\n shape[0] = shape[0] // self._num_cores_per_replica\n return TensorShape(shape)", + "docstring": "Get the input shape for the sparse tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py", + "ast_data": "FunctionDef name:_get_input_shape_for_sparse_tensor arg:self arg:tensor arg:feature arg:per_replica arg:path arguments arg arg arg arg arg Assign Call If Compare Call Raise Call Call Call If BoolOp Compare If Compare Call Call Call If BoolOp Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "input_shape", + "source_code": "@property\ndef input_shape(self):\n if not self._inbound_nodes:\n raise AttributeError('The layer has never been called and thus has no defined input shape.')\n all_input_shapes = set([str(node.input_shapes) for node in self._inbound_nodes])\n if len(all_input_shapes) == 1:\n return self._inbound_nodes[0].input_shapes\n else:\n raise AttributeError('The layer \"' + str(self.name) + ' has multiple inbound nodes, with different input shapes. Hence the notion of \"input shape\" is ill-defined for the layer. Use `get_input_shape_at(node_index)` instead.')", + "docstring": "Retrieves the input shape(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer, or if all inputs have the same shape. Returns: Input shape, as an integer shape tuple (or list of shape tuples, one tuple per input tensor). Raises: AttributeError: if the layer has no defined input_shape. RuntimeError: if called in Eager mode.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", + "ast_data": "FunctionDef name:input_shape arg:self arguments arg If Raise Call Assign Call Call If Compare Call Return return:yes Raise Call Call" + }, + { + "library": "numpy", + "name": "masked_equal", + "source_code": "def masked_equal(x, value, copy=True):\n output = masked_where(equal(x, value), x, copy=copy)\n output.fill_value = value\n return output", + "docstring": "Mask an array where equal to a given value. Return a MaskedArray, masked where the data in array are equal to . The fill_value of the returned MaskedArray is set to . For floating point arrays, consider using ``. See Also -------- masked_where : Mask where a condition is met. masked_values : Mask using floating point equality. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_equal(a, 2) masked_array(data=[0, 1, --, 3], mask=[False, False, True, False], fill_value=2)", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:masked_equal arg:x arg:value arg:copy arguments arg arg arg Assign Call Call Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "partial_fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X=None, y=None):\n if X is None:\n self._global_clustering()\n return self\n else:\n return self._fit(X, partial=True)", + "docstring": "Online learning. Prevents rebuilding of CFTree from scratch. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None Input data. If X is not provided, only the global clustering step is done. y : Ignored Not used, present here for API consistency by convention. Returns ------- self Fitted estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cluster\\_birch.py", + "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arguments arg arg arg If Compare Call Return return:yes Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "aps11_f", + "source_code": "def aps11_f(x, n):\n return (n * x - 1) / ((n - 1) * x)", + "docstring": "Rational function with a zero at x=1/n and a pole at x=0", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_tstutils.py", + "ast_data": "FunctionDef name:aps11_f arg:x arg:n arguments arg arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_linear_predictor", + "source_code": "def _linear_predictor(self, X):\n check_is_fitted(self)\n X = validate_data(self, X, accept_sparse=['csr', 'csc', 'coo'], dtype=[np.float64, np.float32], ensure_2d=True, allow_nd=False, reset=False)\n return X @ self.coef_ + self.intercept_", + "docstring": "Compute the linear_predictor = . Note that we often use the term raw_prediction instead of linear predictor. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. Returns ------- y_pred : array of shape (n_samples,) Returns predicted values of linear predictor.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_glm\\glm.py", + "ast_data": "FunctionDef name:_linear_predictor arg:self arg:X arguments arg arg Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "register_buffer_assignment_hook", + "source_code": "def register_buffer_assignment_hook(mod, assigned_buffers):\n\n def _map_assigned_buffer_to_proxy(_mod, name, buffer):\n if _mod._buffers is mod._buffers:\n if isinstance(buffer, FunctionalTensor):\n buffer = buffer.from_functional()\n assert isinstance(buffer, FakeTensor)\n proxy_mode = torch.fx.experimental.proxy_tensor.get_proxy_mode()\n assert proxy_mode is not None\n proxy = torch.fx.experimental.proxy_tensor.get_proxy_slot(buffer, proxy_mode.tracer).proxy.node\n assigned_buffers[name] = proxy.name\n return buffer\n return torch.nn.modules.module.register_module_buffer_registration_hook(_map_assigned_buffer_to_proxy)", + "docstring": "Register a hook that intercepts buffer assignments. This is used to detect when a buffer is assigned to, and then we can map that buffer to the corresponding proxy node in the graph.", + "type": "function", + "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\utils.py", + "ast_data": "FunctionDef name:register_buffer_assignment_hook arg:mod arg:assigned_buffers arguments arg arg FunctionDef name:_map_assigned_buffer_to_proxy arg:_mod arg:name arg:buffer arguments arg arg arg If Compare If Call Assign Call Call Assign Call Compare Assign Call Assign Return return:yes Return return:yes Call" + }, + { + "library": "scipy", + "name": "sort_cache_result", + "source_code": "def sort_cache_result(self):\n results = {}\n self.xl_maps = np.array(self.xl_maps)\n self.f_maps = np.array(self.f_maps)\n ind_sorted = np.argsort(self.f_maps)\n results['xl'] = self.xl_maps[ind_sorted]\n self.f_maps = np.array(self.f_maps)\n results['funl'] = self.f_maps[ind_sorted]\n results['funl'] = results['funl'].T\n results['x'] = self.xl_maps[ind_sorted[0]]\n results['fun'] = self.f_maps[ind_sorted[0]]\n self.xl_maps = np.ndarray.tolist(self.xl_maps)\n self.f_maps = np.ndarray.tolist(self.f_maps)\n return results", + "docstring": "Sort results and build the global return object", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_shgo.py", + "ast_data": "FunctionDef name:sort_cache_result arg:self arguments arg Assign Assign Call Assign Call Assign Call Assign Assign Call Assign Assign Assign Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_data_home", + "source_code": "@validate_params({'data_home': [str, os.PathLike, None]}, prefer_skip_nested_validation=True)\ndef get_data_home(data_home=None) -> str:\n if data_home is None:\n data_home = environ.get('SCIKIT_LEARN_DATA', join('~', 'scikit_learn_data'))\n data_home = expanduser(data_home)\n makedirs(data_home, exist_ok=True)\n return data_home", + "docstring": "Return the path of the scikit-learn data directory. This folder is used by some large dataset loaders to avoid downloading the data several times. By default the data directory is set to a folder named 'scikit_learn_data' in the user home folder. Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment variable or programmatically by giving an explicit folder path. The '~' symbol is expanded to the user home folder. If the folder does not already exist, it is automatically created. Parameters ---------- data_home : str or path-like, default=None The path to scikit-learn data directory. If , the default path is . Returns ------- data_home: str The path to scikit-learn data directory. Examples -------- >>> import os >>> from sklearn.datasets import get_data_home >>> data_home_path = get_data_home() >>> os.path.exists(data_home_path) True", + "type": "function", + "file_path": "scikit-learn\\sklearn\\datasets\\_base.py", + "ast_data": "FunctionDef name:get_data_home arg:data_home arguments arg If Compare Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, sess, grpc_debug_server_addresses, watch_fn=None, thread_name_filter=None):\n framework.NonInteractiveDebugWrapperSession.__init__(self, sess, watch_fn=watch_fn, thread_name_filter=thread_name_filter)\n if isinstance(grpc_debug_server_addresses, str):\n self._grpc_debug_server_urls = [self._normalize_grpc_url(grpc_debug_server_addresses)]\n elif isinstance(grpc_debug_server_addresses, list):\n self._grpc_debug_server_urls = []\n for address in grpc_debug_server_addresses:\n if not isinstance(address, str):\n raise TypeError('Expected type str in list grpc_debug_server_addresses, received type %s' % type(address))\n self._grpc_debug_server_urls.append(self._normalize_grpc_url(address))\n else:\n raise TypeError('Expected type str or list in grpc_debug_server_addresses, received type %s' % type(grpc_debug_server_addresses))", + "docstring": "Constructor of DumpingDebugWrapperSession. Args: sess: The TensorFlow object being wrapped. grpc_debug_server_addresses: ( or of ) Single or a list of the gRPC debug server addresses, in the format of , with or without the \"grpc://\" prefix. For example: \"localhost:7000\", [\"localhost:7000\", \"192.168.0.2:8000\"] watch_fn: () A Callable that can be used to define per-run debug ops and watched tensors. See the doc of for details. thread_name_filter: Regular-expression white list for threads on which the wrapper session will be active. See doc of for more details. Raises: TypeError: If is not a or a of .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\grpc_wrapper.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:sess arg:grpc_debug_server_addresses arg:watch_fn arg:thread_name_filter arguments arg arg arg arg arg Call If Call Assign Call If Call Assign For If Call Raise Call Call Call Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "_gather_saveables_for_checkpoint", + "source_code": "def _gather_saveables_for_checkpoint(self):\n\n def _saveable_factory(name=self._common_name):\n return _DistributedVariableSaveable(self, self._primary, name)\n return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}", + "docstring": "Overrides Trackable method. This allows both name-based and object-based save and restore of DistributedVariables. Returns: A dictionary mapping attribute names to factories.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "FunctionDef name:_gather_saveables_for_checkpoint arg:self arguments arg FunctionDef name:_saveable_factory arg:name arguments arg Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "add_to_set_toplevel", + "source_code": "@staticmethod\ndef add_to_set_toplevel(key: str, value: Any, log_level: CompileEventLogLevel=CompileEventLogLevel.COMPILATION_METRIC):\n chromium_log = get_chromium_event_logger()\n top_event = chromium_log.get_outermost_event()\n if top_event is None:\n raise RuntimeError('No toplevel event active. Please only call this function within a metrics context/dynamo_timed.')\n CompileEventLogger.add_to_set(top_event, log_level, key, value)", + "docstring": "Same as add to set, just does it automatically to the toplevel event instead of having to explicitly name it. Defaults to COMPILATION_METRIC log level.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:add_to_set_toplevel arg:key arg:value arg:log_level arguments arg arg arg Assign Call Assign Call If Compare Raise Call Call" + }, + { + "library": "seaborn", + "name": "_repr_html_", + "source_code": "def _repr_html_(self):\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += f''\n html += ''\n return html", + "docstring": "Rich display of the color palette in an HTML frontend.", + "type": "method", + "file_path": "seaborn\\seaborn\\palettes.py", + "ast_data": "FunctionDef name:_repr_html_ arg:self arguments arg Assign Assign Call Assign For Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "compute_max_candidates", + "source_code": "def compute_max_candidates(p_m0: Tensor, p_m1: Tensor) -> Tensor:\n h0s, w0s = (p_m0.sum(1).max(-1)[0], p_m0.sum(-1).max(-1)[0])\n h1s, w1s = (p_m1.sum(1).max(-1)[0], p_m1.sum(-1).max(-1)[0])\n max_cand = torch.sum(torch.min(torch.stack([h0s * w0s, h1s * w1s], -1), -1)[0])\n return max_cand", + "docstring": "Compute the max candidates of all pairs within a batch. Args: p_m0: padded mask 0 p_m1: padded mask 1", + "type": "function", + "file_path": "kornia\\kornia\\feature\\loftr\\utils\\coarse_matching.py", + "ast_data": "FunctionDef name:compute_max_candidates arg:p_m0 arg:p_m1 arguments arg arg Assign Call Call Call Call Assign Call Call Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "authlib", + "name": "compress", + "source_code": "def compress(self, s):\n data = zlib.compress(s)\n return data[2:-4]", + "docstring": "Compress bytes data with DEFLATE algorithm.", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7518\\jwe_zips.py", + "ast_data": "FunctionDef name:compress arg:self arg:s arguments arg arg Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "_gamma1p", + "source_code": "def _gamma1p(vals):\n res = gamma(vals + 1)\n if isinstance(res, np.ndarray):\n if not _is_subdtype(vals.dtype, 'c'):\n res[vals == -1] = np.nan\n elif np.isinf(res) and vals == -1:\n res = np.float64('nan')\n return res", + "docstring": "returns gamma(n+1), though with NaN at -1 instead of inf, c.f. #21827", + "type": "function", + "file_path": "scipy\\scipy\\special\\_basic.py", + "ast_data": "FunctionDef name:_gamma1p arg:vals arguments arg Assign Call If Call If Call Assign Compare If BoolOp Call Compare Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_topmost_subplotspec", + "source_code": "def get_topmost_subplotspec(self):\n return self._subplot_spec.get_topmost_subplotspec()", + "docstring": "Return the topmost instance associated with the subplot.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py", + "ast_data": "FunctionDef name:get_topmost_subplotspec arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "bbox_artist", + "source_code": "def bbox_artist(artist, renderer, props=None, fill=True):\n if props is None:\n props = {}\n props = props.copy()\n pad = props.pop('pad', 4)\n pad = renderer.points_to_pixels(pad)\n bbox = artist.get_window_extent(renderer)\n r = Rectangle(xy=(bbox.x0 - pad / 2, bbox.y0 - pad / 2), width=bbox.width + pad, height=bbox.height + pad, fill=fill, transform=transforms.IdentityTransform(), clip_on=False)\n r.update(props)\n r.draw(renderer)", + "docstring": "A debug function to draw a rectangle around the bounding box returned by an artist's to test whether the artist is returning the correct bbox. *props* is a dict of rectangle props with the additional property 'pad' that sets the padding around the bbox in points.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:bbox_artist arg:artist arg:renderer arg:props arg:fill arguments arg arg arg arg If Compare Assign Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call" + }, + { + "library": "pytorch", + "name": "fsdp_checkpointing_base", + "source_code": "def fsdp_checkpointing_base(model, blocks):\n non_reentrant_wrapper = functools.partial(checkpoint_wrapper, offload_to_cpu=False, checkpoint_impl=CheckpointImpl.NO_REENTRANT)\n\n def check_fn(submodule):\n return isinstance(submodule, blocks)\n apply_activation_checkpointing(model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn)", + "docstring": "apply activation checkpointing to model returns None as model is updated directly", + "type": "function", + "file_path": "pytorch\\benchmarks\\dynamo\\dist_util.py", + "ast_data": "FunctionDef name:fsdp_checkpointing_base arg:model arg:blocks arguments arg arg Assign Call FunctionDef name:check_fn arg:submodule arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "get_python_args", + "source_code": "def get_python_args(self, *args, **kwargs):\n\n def map_arg(arg):\n if isinstance(arg, ConstantVariable):\n return arg.as_python_constant()\n elif isinstance(arg, ListVariable) and (not arg.items):\n return []\n elif isinstance(arg, ConstDictVariable) and isinstance(arg.source, GetItemSource) and isinstance(arg.source.base, AttrSource) and (arg.source.base.member == 'param_groups'):\n return self.value.param_groups[arg.source.index]\n raise ArgMappingException\n new_args = [map_arg(arg) for arg in args]\n new_kwargs = {k: map_arg(v) for k, v in kwargs.items()}\n return (new_args, new_kwargs)", + "docstring": "Get python values equivalent to the variable tracker args", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\variables\\optimizer.py", + "ast_data": "FunctionDef name:get_python_args arg:self arguments arg arg arg FunctionDef name:map_arg arg:arg arguments arg If Call Return return:yes Call If BoolOp Call Return return:no If BoolOp Call Call Call Compare Return return:yes Raise Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "name_scope_only_in_function_or_graph", + "source_code": "def name_scope_only_in_function_or_graph(name):\n if not context.executing_eagerly():\n return ops.name_scope_v1(name)\n else:\n return NullContextmanager()", + "docstring": "Internal-only entry point for . Enters a compat.v1.name_scope only when in a function or graph, not when running fully eagerly. Args: name: The name argument that is passed to the op function. Returns: context manager.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py", + "ast_data": "FunctionDef name:name_scope_only_in_function_or_graph arg:name arguments arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "update_tr_radius", + "source_code": "def update_tr_radius(Delta, actual_reduction, predicted_reduction, step_norm, bound_hit):\n if predicted_reduction > 0:\n ratio = actual_reduction / predicted_reduction\n elif predicted_reduction == actual_reduction == 0:\n ratio = 1\n else:\n ratio = 0\n if ratio < 0.25:\n Delta = 0.25 * step_norm\n elif ratio > 0.75 and bound_hit:\n Delta *= 2.0\n return (Delta, ratio)", + "docstring": "Update the radius of a trust region based on the cost reduction. Returns ------- Delta : float New radius. ratio : float Ratio between actual and predicted reductions.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py", + "ast_data": "FunctionDef name:update_tr_radius arg:Delta arg:actual_reduction arg:predicted_reduction arg:step_norm arg:bound_hit arguments arg arg arg arg arg If Compare Assign If Compare Assign Assign If Compare Assign If BoolOp Compare Return return:yes" + }, + { + "library": "pytorch", + "name": "update", + "source_code": "def update(op, device_name, version, key, value):\n if not value:\n warnings.warn(f'skipping empty value for {op}: device_name={device_name!r} version={version!r} key={key!r}')\n return\n if (op, device_name, version) in _operation_device_version_data:\n if _operation_device_version_data[op, device_name, version].get(key) == value:\n return\n _operation_device_version_data[op, device_name, version][key] = value\n else:\n _operation_device_version_data[op, device_name, version] = {key: value}", + "docstring": "Update the db of op parameters.", + "type": "function", + "file_path": "pytorch\\torch\\sparse\\_triton_ops_meta.py", + "ast_data": "FunctionDef name:update arg:op arg:device_name arg:version arg:key arg:value arguments arg arg arg arg arg If Call Return return:no If Compare If Compare Call Return return:no Assign Assign" + }, + { + "library": "tensorflow", + "name": "_add_dispatch_for_unary_elementwise_api", + "source_code": "def _add_dispatch_for_unary_elementwise_api(api, x_type, elementwise_api_handler):\n api_signature = tf_inspect.signature(api)\n x_name = list(api_signature.parameters)[0]\n name_index = _find_name_index(api_signature)\n need_to_bind_api_args = len(api_signature.parameters) > 2 or 'name' not in api_signature.parameters\n\n @dispatch_for_api(api, {x_name: x_type})\n def dispatch_target(*args, **kwargs):\n args, kwargs, name = _extract_name_arg(args, kwargs, name_index)\n if args:\n x, args = (args[0], args[1:])\n else:\n x = kwargs.pop(x_name)\n if need_to_bind_api_args:\n tensor_api = lambda v: api(v, *args, **kwargs)\n else:\n tensor_api = api\n if name is None:\n return elementwise_api_handler(tensor_api, x)\n else:\n with ops.name_scope(name, None, [x]):\n return elementwise_api_handler(tensor_api, x)\n dispatch_target.__name__ = 'elementwise_dispatch_target_for_' + api.__name__\n dispatch_target.__qualname__ = dispatch_target.__name__\n target_list = _ELEMENTWISE_API_TARGETS.setdefault((x_type,), [])\n target_list.append((api, dispatch_target))", + "docstring": "Registers a unary elementwise handler as a dispatcher for a given API.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py", + "ast_data": "FunctionDef name:_add_dispatch_for_unary_elementwise_api arg:api arg:x_type arg:elementwise_api_handler arguments arg arg arg Assign Call Assign Call Assign Call Assign BoolOp Compare Call Compare FunctionDef name:dispatch_target arguments arg arg Assign Call If Assign Assign Call If Assign arguments arg Call Assign If Compare Return return:yes Call With Call Return return:yes Call Call Assign Assign Assign Call Call" + }, + { + "library": "numpy", + "name": "herme2poly", + "source_code": "def herme2poly(c):\n from .polynomial import polyadd, polymulx, polysub\n [c] = pu.as_series([c])\n n = len(c)\n if n == 1:\n return c\n if n == 2:\n return c\n else:\n c0 = c[-2]\n c1 = c[-1]\n for i in range(n - 1, 1, -1):\n tmp = c0\n c0 = polysub(c[i - 2], c1 * (i - 1))\n c1 = polyadd(tmp, polymulx(c1))\n return polyadd(c0, polymulx(c1))", + "docstring": "Convert a Hermite series to a polynomial. Convert an array representing the coefficients of a Hermite series, ordered from lowest degree to highest, to an array of the coefficients of the equivalent polynomial (relative to the \"standard\" basis) ordered from lowest to highest degree. Parameters ---------- c : array_like 1-D array containing the Hermite series coefficients, ordered from lowest order term to highest. Returns ------- pol : ndarray 1-D array containing the coefficients of the equivalent polynomial (relative to the \"standard\" basis) ordered from lowest order term to highest. See Also -------- poly2herme Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> from numpy.polynomial.hermite_e import herme2poly >>> herme2poly([ 2., 10., 2., 3.]) array([0., 1., 2., 3.])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\hermite_e.py", + "ast_data": "FunctionDef name:herme2poly arg:c arguments arg Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes Assign Assign For Call Assign Assign Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "compute_jac_scale", + "source_code": "def compute_jac_scale(J, scale_inv_old=None):\n if issparse(J):\n scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel() ** 0.5\n else:\n scale_inv = np.sum(J ** 2, axis=0) ** 0.5\n if scale_inv_old is None:\n scale_inv[scale_inv == 0] = 1\n else:\n scale_inv = np.maximum(scale_inv, scale_inv_old)\n return (1 / scale_inv, scale_inv)", + "docstring": "Compute variables scale based on the Jacobian matrix.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py", + "ast_data": "FunctionDef name:compute_jac_scale arg:J arg:scale_inv_old arguments arg arg If Call Assign Call Call Call Call Assign Call If Compare Assign Compare Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "Pool", + "source_code": "class Pool(multiprocessing.pool.Pool):\n\n def _setup_queues(self):\n self._inqueue = SimpleQueue()\n self._outqueue = SimpleQueue()\n self._quick_put = self._inqueue._writer.send\n self._quick_get = self._outqueue._reader.recv\n\n def _repopulate_pool(self):\n for _ in range(self._processes - len(self._pool)):\n args = (self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild)\n if hasattr(self, '_wrap_exception'):\n args += (self._wrap_exception,)\n w = self.Process(target=clean_worker, args=args)\n self._pool.append(w)\n w.name = w.name.replace('Process', 'PoolWorker')\n w.daemon = True\n w.start()\n util.debug('added worker')", + "docstring": "Pool implementation which uses our version of SimpleQueue. This lets us pass tensors in shared memory across processes instead of serializing the underlying data.", + "type": "class", + "file_path": "pytorch\\torch\\multiprocessing\\pool.py", + "ast_data": "ClassDef name:Pool FunctionDef name:_setup_queues arg:self arguments arg Assign Call Assign Call Assign Assign FunctionDef name:_repopulate_pool arg:self arguments arg For Call Call Assign If Call Assign Call Call Assign Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "unpack", + "source_code": "def unpack(self, parallel_tensor):\n self._assert_eager()\n unpacked_components = [[] for _ in range(len(self.components))]\n with ops.device(self._name):\n parallel_tensor = variable_utils.convert_variables_to_tensors(parallel_tensor)\n for tensor in nest.flatten(parallel_tensor, expand_composites=True):\n for accumulator, unpacked_tensor in zip(unpacked_components, self._unpack_tensor(tensor)):\n accumulator.append(unpacked_tensor)\n return [nest.pack_sequence_as(parallel_tensor, unpacked, expand_composites=True) for unpacked in unpacked_components]", + "docstring": "Unpack a parallel tensor into its components. Args: parallel_tensor: A tensor, composite tensor, or of such placed on the ParallelDevice. Passing objects reads their value, it does not share a mutable reference between the packed and unpacked forms. Returns: A list with the same length as each with the same structure as , containing component tensors.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\parallel_device\\parallel_device.py", + "ast_data": "FunctionDef name:unpack arg:self arg:parallel_tensor arguments arg arg Call Assign Call Call With Call Assign Call For Call For Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "check_exception_table", + "source_code": "def check_exception_table(tab: list[ExceptionTableEntry]) -> None:\n for i in range(len(tab) - 1):\n assert tab[i].start <= tab[i].end and tab[i].end < tab[i + 1].start and (tab[i + 1].start <= tab[i + 1].end)", + "docstring": "Verifies that a list of ExceptionTableEntries will make a well-formed jump table: entries are non-empty, sorted, and do not overlap.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", + "ast_data": "FunctionDef name:check_exception_table arg:tab arguments arg For Call Call BoolOp Compare Compare Compare" + }, + { + "library": "pygame", + "name": "OrderedUpdates", + "source_code": "class OrderedUpdates(RenderUpdates):\n\n def __init__(self, *sprites):\n self._spritelist = []\n RenderUpdates.__init__(self, *sprites)\n\n def sprites(self):\n return self._spritelist.copy()\n\n def add_internal(self, sprite, layer=None):\n RenderUpdates.add_internal(self, sprite)\n self._spritelist.append(sprite)\n\n def remove_internal(self, sprite):\n RenderUpdates.remove_internal(self, sprite)\n self._spritelist.remove(sprite)", + "docstring": "RenderUpdates class that draws Sprites in order of addition pygame.sprite.OrderedUpdates(*sprites): return OrderedUpdates This class derives from pygame.sprite.RenderUpdates(). It maintains the order in which the Sprites were added to the Group for rendering. This makes adding and removing Sprites from the Group a little slower than regular Groups.", + "type": "class", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "ClassDef name:OrderedUpdates FunctionDef name:__init__ arg:self arguments arg arg Assign Call FunctionDef name:sprites arg:self arguments arg Return return:yes Call FunctionDef name:add_internal arg:self arg:sprite arg:layer arguments arg arg arg Call Call FunctionDef name:remove_internal arg:self arg:sprite arguments arg arg Call Call" + }, + { + "library": "pandas", + "name": "IndexType", + "source_code": "class IndexType(types.Type):\n\n def __init__(self, dtype, layout, pyclass: any) -> None:\n self.pyclass = pyclass\n name = f'index({dtype}, {layout})'\n self.dtype = dtype\n self.layout = layout\n super().__init__(name)\n\n @property\n def key(self):\n return (self.pyclass, self.dtype, self.layout)\n\n @property\n def as_array(self):\n return types.Array(self.dtype, 1, self.layout)\n\n def copy(self, dtype=None, ndim: int=1, layout=None) -> Self:\n assert ndim == 1\n if dtype is None:\n dtype = self.dtype\n layout = layout or self.layout\n return type(self)(dtype, layout, self.pyclass)", + "docstring": "The type class for Index objects.", + "type": "class", + "file_path": "pandas\\pandas\\core\\_numba\\extensions.py", + "ast_data": "ClassDef name:IndexType FunctionDef name:__init__ arg:self arg:dtype arg:layout arg:pyclass arguments arg arg arg arg Assign Assign Assign Assign Call Call FunctionDef name:key arg:self arguments arg Return return:yes FunctionDef name:as_array arg:self arguments arg Return return:yes Call FunctionDef name:copy arg:self arg:dtype arg:ndim arg:layout arguments arg arg arg arg Compare If Compare Assign Assign BoolOp Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_is_scalar_access", + "source_code": "def _is_scalar_access(self, key: tuple) -> bool:\n if len(key) != self.ndim:\n return False\n return all((is_integer(k) for k in key))", + "docstring": "Returns ------- bool", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexing.py", + "ast_data": "FunctionDef name:_is_scalar_access arg:self arg:key arguments arg arg If Compare Call Return return:yes Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "mark_as_unsaveable", + "source_code": "def mark_as_unsaveable(self, error_message):\n self._saveable = False\n if isinstance(error_message, str):\n error_message = [error_message]\n self._saving_errors.update(error_message)", + "docstring": "Marks this FuncGraph as unsaveable. Any attempts to export this FuncGraph will raise an error with the specified message. Args: error_message: List or string containing the error message to be raised when saving this FuncGraph to SavedModel.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py", + "ast_data": "FunctionDef name:mark_as_unsaveable arg:self arg:error_message arguments arg arg Assign If Call Assign Call" + }, + { + "library": "pytorch", + "name": "compile_submod", + "source_code": "def compile_submod(self, input_mod, args, kwargs):\n assert len(kwargs) == 0, 'We assume only args for these modules'\n\n class WrapperModule(torch.nn.Module):\n\n def __init__(self, submod, unwrap_singleton_tuple) -> None:\n super().__init__()\n self.submod = submod\n self.unwrap_singleton_tuple = unwrap_singleton_tuple\n\n def forward(self, *args):\n x = self.submod(*args)\n if self.unwrap_singleton_tuple and isinstance(x, (tuple, list)):\n return x[0]\n return x\n unwrap_singleton_tuple = False\n for sn in input_mod.graph.nodes:\n if sn.op == 'output':\n if not isinstance(sn.args[0], tuple):\n unwrap_singleton_tuple = True\n sn.args = (sn.args,)\n input_mod.recompile()\n input_mod.compile_subgraph_reason = GraphCompileReason('DDPOptimizer intentional graph-break (See Note [DDPOptimizer]). Set `torch._dynamo.config.optimize_ddp = False` to disable.', [traceback.FrameSummary(__file__, 0, DDPOptimizer)])\n wrapper = WrapperModule(self.compiler(input_mod, args), unwrap_singleton_tuple)\n return wrapper", + "docstring": "Compile the submodule, using a wrapper to make sure its output is always a tuple, which is required by AotAutograd based compilers", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\backends\\distributed.py", + "ast_data": "FunctionDef name:compile_submod arg:self arg:input_mod arg:args arg:kwargs arguments arg arg arg arg Compare Call ClassDef name:WrapperModule FunctionDef name:__init__ arg:self arg:submod arg:unwrap_singleton_tuple arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arguments arg arg Assign Call If BoolOp Call Return return:yes Return return:yes Assign For If Compare If Call Assign Assign Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "pygame", + "name": "threadloop", + "source_code": "def threadloop(self):\n while True:\n args = self.queue.get()\n if args is STOP:\n self.queue.put(STOP)\n self.queue.task_done()\n break\n try:\n args[0](*args[1], **args[2])\n finally:\n self.queue.task_done()", + "docstring": "Loops until all of the tasks are finished.", + "type": "method", + "file_path": "pygame\\src_py\\threads\\__init__.py", + "ast_data": "FunctionDef name:threadloop arg:self arguments arg While Assign Call If Compare Call Call Try Call Call" + }, + { + "library": "tensorflow", + "name": "prefer_static_broadcast_shape", + "source_code": "def prefer_static_broadcast_shape(shape1, shape2, name='prefer_static_broadcast_shape'):\n with ops.name_scope(name, values=[shape1, shape2]):\n\n def make_shape_tensor(x):\n return ops.convert_to_tensor(x, name='shape', dtype=dtypes.int32)\n\n def get_tensor_shape(s):\n if isinstance(s, tensor_shape.TensorShape):\n return s\n s_ = tensor_util.constant_value(make_shape_tensor(s))\n if s_ is not None:\n return tensor_shape.TensorShape(s_)\n return None\n\n def get_shape_tensor(s):\n if not isinstance(s, tensor_shape.TensorShape):\n return make_shape_tensor(s)\n if s.is_fully_defined():\n return make_shape_tensor(s.as_list())\n raise ValueError('Cannot broadcast from partially defined `TensorShape`.')\n shape1_ = get_tensor_shape(shape1)\n shape2_ = get_tensor_shape(shape2)\n if shape1_ is not None and shape2_ is not None:\n return array_ops.broadcast_static_shape(shape1_, shape2_)\n shape1_ = get_shape_tensor(shape1)\n shape2_ = get_shape_tensor(shape2)\n return array_ops.broadcast_dynamic_shape(shape1_, shape2_)", + "docstring": "Convenience function which statically broadcasts shape when possible. Args: shape1: integer . Already converted to tensor! shape2: integer . Already converted to tensor! name: A string name to prepend to created ops. Returns: The broadcast shape, either as (if broadcast can be done statically), or as a .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py", + "ast_data": "FunctionDef name:prefer_static_broadcast_shape arg:shape1 arg:shape2 arg:name arguments arg arg arg With Call FunctionDef name:make_shape_tensor arg:x arguments arg Return return:yes Call FunctionDef name:get_tensor_shape arg:s arguments arg If Call Return return:yes Assign Call Call If Compare Return return:yes Call Return return:no FunctionDef name:get_shape_tensor arg:s arguments arg If Call Return return:yes Call If Call Return return:yes Call Call Raise Call Assign Call Assign Call If BoolOp Compare Compare Return return:yes Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "has_invalid_submodule_updates", + "source_code": "def has_invalid_submodule_updates(self) -> bool:\n return len(self.get_changed_submodules()) > 0 and 'submodule' not in self.get_title().lower() and ('submodule' not in self.get_body().lower()) and all(('submodule' not in label for label in self.get_labels()))", + "docstring": "Submodule updates in PR are invalid if submodule keyword is not mentioned in neither the title nor body/description nor in any of the labels.", + "type": "method", + "file_path": "pytorch\\.github\\scripts\\trymerge.py", + "ast_data": "FunctionDef name:has_invalid_submodule_updates arg:self arguments arg Return return:yes BoolOp Compare Call Call Compare Call Call Compare Call Call Call Compare Call" + }, + { + "library": "pandas", + "name": "_drop_labels_or_levels", + "source_code": "@final\ndef _drop_labels_or_levels(self, keys, axis: AxisInt=0):\n axis = self._get_axis_number(axis)\n keys = common.maybe_make_list(keys)\n invalid_keys = [k for k in keys if not self._is_label_or_level_reference(k, axis=axis)]\n if invalid_keys:\n raise ValueError(f'The following keys are not valid labels or levels for axis {axis}: {invalid_keys}')\n levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]\n labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]\n dropped = self.copy(deep=False)\n if axis == 0:\n if levels_to_drop:\n dropped.reset_index(levels_to_drop, drop=True, inplace=True)\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=1, inplace=True)\n else:\n if levels_to_drop:\n if isinstance(dropped.columns, MultiIndex):\n dropped.columns = dropped.columns.droplevel(levels_to_drop)\n else:\n dropped.columns = default_index(dropped.columns.size)\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=0, inplace=True)\n return dropped", + "docstring": "Drop labels and/or levels for the given . For each key in : - (axis=0): If key matches a column label then drop the column. Otherwise if key matches an index level then drop the level. - (axis=1): If key matches an index label then drop the row. Otherwise if key matches a column level then drop the level. Parameters ---------- keys : str or list of str labels or levels to drop axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- dropped: DataFrame Raises ------ ValueError if any match neither a label nor a level", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:_drop_labels_or_levels arg:self arg:keys arg:axis arguments arg arg arg Assign Call Assign Call Assign Call If Raise Call Assign Call Assign Call Assign Call If Compare If Call If Call If If Call Assign Call Assign Call If Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "__call__", + "source_code": "def __call__(self, *args, **kwargs):\n if args:\n raise TypeError('The %r Tool does not accept positional arguments; you must use keyword arguments.' % self._name)\n\n def tool_decorator(f):\n if not hasattr(f, '_cp_config'):\n f._cp_config = {}\n subspace = self.namespace + '.' + self._name + '.'\n f._cp_config[subspace + 'on'] = True\n for k, v in kwargs.items():\n f._cp_config[subspace + k] = v\n return f\n return tool_decorator", + "docstring": "Compile-time decorator (turn on the tool in config). For example:: @expose @tools.proxy() def whats_my_base(self): return cherrypy.request.base", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cptools.py", + "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg If Raise Call FunctionDef name:tool_decorator arg:f arguments arg If Call Assign Assign Assign For Call Assign Return return:yes Return return:yes" + }, + { + "library": "numpy", + "name": "default_config_dict", + "source_code": "def default_config_dict(name=None, parent_name=None, local_path=None):\n import warnings\n warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of deprecated default_config_dict(%r,%r,%r)' % (name, parent_name, local_path, name, parent_name, local_path), stacklevel=2)\n c = Configuration(name, parent_name, local_path)\n return c.todict()", + "docstring": "Return a configuration dictionary for usage in configuration() function defined in file setup_.py.", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\misc_util.py", + "ast_data": "FunctionDef name:default_config_dict arg:name arg:parent_name arg:local_path arguments arg arg arg Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_get_transformer_list", + "source_code": "def _get_transformer_list(estimators):\n transformers, columns = zip(*estimators)\n names, _ = zip(*_name_estimators(transformers))\n transformer_list = list(zip(names, transformers, columns))\n return transformer_list", + "docstring": "Construct (name, trans, column) tuples from list", + "type": "function", + "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py", + "ast_data": "FunctionDef name:_get_transformer_list arg:estimators arguments arg Assign Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "compute_specificity_at_sensitivity", + "source_code": "def compute_specificity_at_sensitivity(tp, tn, fp, fn, name):\n sensitivities = math_ops.divide(tp, tp + fn + kepsilon)\n min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))\n indices_at_minval = math_ops.equal(math_ops.abs(sensitivities - sensitivity), min_val)\n indices_at_minval = math_ops.cast(indices_at_minval, dtypes.int64)\n indices_at_minval = math_ops.cumsum(indices_at_minval)\n tf_index = math_ops.argmax(indices_at_minval, 0)\n tf_index = math_ops.cast(tf_index, dtypes.int32)\n return math_ops.divide(tn[tf_index], tn[tf_index] + fp[tf_index] + kepsilon, name)", + "docstring": "Computes the specificity at the given sensitivity. Args: tp: True positives. tn: True negatives. fp: False positives. fn: False negatives. name: The name of the operation. Returns: The specificity using the aggregated values.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py", + "ast_data": "FunctionDef name:compute_specificity_at_sensitivity arg:tp arg:tn arg:fp arg:fn arg:name arguments arg arg arg arg arg Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_svqb", + "source_code": "def _get_svqb(self, U: Tensor, drop: bool, tau: float) -> Tensor:\n if torch.numel(U) == 0:\n return U\n UBU = _utils.qform(self.B, U)\n d = UBU.diagonal(0, -2, -1)\n nz = torch.where(abs(d) != 0.0)\n assert len(nz) == 1, nz\n if len(nz[0]) < len(d):\n U = U[:, nz[0]]\n if torch.numel(U) == 0:\n return U\n UBU = _utils.qform(self.B, U)\n d = UBU.diagonal(0, -2, -1)\n nz = torch.where(abs(d) != 0.0)\n assert len(nz[0]) == len(d)\n d_col = (d ** (-0.5)).reshape(d.shape[0], 1)\n DUBUD = UBU * d_col * d_col.mT\n E, Z = _utils.symeig(DUBUD)\n t = tau * abs(E).max()\n if drop:\n keep = torch.where(E > t)\n assert len(keep) == 1, keep\n E = E[keep[0]]\n Z = Z[:, keep[0]]\n d_col = d_col[keep[0]]\n else:\n E[torch.where(E < t)[0]] = t\n return torch.matmul(U * d_col.mT, Z * E ** (-0.5))", + "docstring": "Return B-orthonormal U. .. note:: When is then is based on the Algorithm 4 from [DuerschPhD2015] that is a slight modification of the corresponding algorithm introduced in [StathopolousWu2002]. Args: U (Tensor) : initial approximation, size is (m, n) drop (bool) : when True, drop columns that contribution to the is small. tau (float) : positive tolerance Returns: U (Tensor) : B-orthonormal columns (:math:), size is (m, n1), where if is n1 <= n`.", + "type": "method", + "file_path": "pytorch\\torch\\_lobpcg.py", + "ast_data": "FunctionDef name:_get_svqb arg:self arg:U arg:drop arg:tau arguments arg arg arg arg If Compare Call Return return:yes Assign Call Assign Call Assign Call Compare Call Compare Call If Compare Call Call Assign If Compare Call Return return:yes Assign Call Assign Call Assign Call Compare Call Compare Call Call Assign Call Assign Assign Call Assign Call Call If Assign Call Compare Compare Call Assign Assign Assign Assign Call Compare Return return:yes Call" + }, + { + "library": "django", + "name": "render_to_kmz", + "source_code": "def render_to_kmz(*args, **kwargs):\n return HttpResponse(compress_kml(loader.render_to_string(*args, **kwargs)), content_type='application/vnd.google-earth.kmz')", + "docstring": "Compress the KML content and return as KMZ (using the correct MIME type).", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\shortcuts.py", + "ast_data": "FunctionDef name:render_to_kmz arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_dimension_tensor_conversion_function", + "source_code": "def _dimension_tensor_conversion_function(d, dtype=None, name=None, as_ref=False):\n _ = as_ref\n if d.value is None:\n raise ValueError(f'Cannot convert unknown Dimension {d} to a Tensor.')\n if dtype is not None:\n if dtype not in (dtypes.int32, dtypes.int64):\n raise TypeError(f'Cannot convert Dimension {d} to dtype {dtype}. Allowed dtypes are tf.int32 and tf.int64.')\n else:\n dtype = dtypes.int32\n if name is None:\n name = 'shape_as_tensor'\n return constant(d.value, dtype=dtype, name=name)", + "docstring": "Function to convert Dimension to Tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\constant_op.py", + "ast_data": "FunctionDef name:_dimension_tensor_conversion_function arg:d arg:dtype arg:name arg:as_ref arguments arg arg arg arg Assign If Compare Raise Call If Compare If Compare Raise Call Assign If Compare Assign Return return:yes Call" + }, + { + "library": "numpy", + "name": "Methods0DFloatInt", + "source_code": "class Methods0DFloatInt(Benchmark):\n params = [['__int__', '__float__'], [dt for dt in TYPES1 if not dt.startswith('complex')]]\n param_names = ['methods', 'npdtypes']\n timeout = 10\n\n def setup(self, methname, npdtypes):\n self.xarg = np.array(3, dtype=npdtypes)\n\n def time_ndarray__0d__(self, methname, npdtypes):\n meth = getattr(self.xarg, methname)\n meth()", + "docstring": "Zero dimension array methods", + "type": "class", + "file_path": "numpy\\benchmarks\\benchmarks\\bench_ufunc.py", + "ast_data": "ClassDef name:Methods0DFloatInt Assign Call Assign Assign FunctionDef name:setup arg:self arg:methname arg:npdtypes arguments arg arg arg Assign Call FunctionDef name:time_ndarray__0d__ arg:self arg:methname arg:npdtypes arguments arg arg arg Assign Call Call" + }, + { + "library": "pytorch", + "name": "_device_range_end", + "source_code": "def _device_range_end(range_handle: object, stream: int=0) -> None:\n _nvtx.deviceRangeEnd(range_handle, stream)", + "docstring": "Mark the end of a range for a given range_handle as soon as all the tasks on the CUDA stream are completed. Args: range_handle: an unique handle for the start range. stream (int): CUDA stream id.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\nvtx.py", + "ast_data": "FunctionDef name:_device_range_end arg:range_handle arg:stream arguments arg arg Call" + }, + { + "library": "kornia", + "name": "BinaryFocalLossWithLogits", + "source_code": "class BinaryFocalLossWithLogits(nn.Module):\n\n def __init__(self, alpha: Optional[float], gamma: float=2.0, reduction: str='none', pos_weight: Optional[Tensor]=None, weight: Optional[Tensor]=None, ignore_index: Optional[int]=-100) -> None:\n super().__init__()\n self.alpha: Optional[float] = alpha\n self.gamma: float = gamma\n self.reduction: str = reduction\n self.pos_weight: Optional[Tensor] = pos_weight\n self.weight: Optional[Tensor] = weight\n self.ignore_index: Optional[int] = ignore_index\n\n def forward(self, pred: Tensor, target: Tensor) -> Tensor:\n return binary_focal_loss_with_logits(pred, target, self.alpha, self.gamma, self.reduction, self.pos_weight, self.weight, self.ignore_index)", + "docstring": "Criterion that computes Focal loss. According to :cite:, the Focal loss is computed as follows: .. math:: \\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t) where: - :math: is the model's estimated probability for each class. Args: alpha: Weighting factor :math:. gamma: Focusing parameter :math:. reduction: Specifies the reduction to apply to the output: `(num\\_of\\_classes,)(num\\_of\\_classes,)(N, C, *)(N, C, *)` where each value is between 0 and 1. Examples: >>> C = 3 # num_classes >>> pred = torch.randn(1, C, 5, requires_grad=True) >>> target = torch.randint(2, (1, C, 5)) >>> kwargs = {\"alpha\": 0.25, \"gamma\": 2.0, \"reduction\": 'mean'} >>> criterion = BinaryFocalLossWithLogits(**kwargs) >>> output = criterion(pred, target) >>> output.backward()", + "type": "class", + "file_path": "kornia\\kornia\\losses\\focal.py", + "ast_data": "ClassDef name:BinaryFocalLossWithLogits FunctionDef name:__init__ arg:self arg:alpha arg:gamma arg:reduction arg:pos_weight arg:weight arg:ignore_index arguments arg arg arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:pred arg:target arguments arg arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "identity", + "source_code": "@classmethod\ndef identity(cls, batch_size: Optional[int]=None, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> So3:\n return cls(Quaternion.identity(batch_size, device, dtype))", + "docstring": "Create a So3 group representing an identity rotation. Args: batch_size: the batch size of the underlying data. device: device to place the result on. dtype: dtype of the result. Example: >>> s = So3.identity() >>> s Parameter containing: tensor([1., 0., 0., 0.], requires_grad=True) >>> s = So3.identity(batch_size=2) >>> s Parameter containing: tensor([[1., 0., 0., 0.], [1., 0., 0., 0.]], requires_grad=True)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py", + "ast_data": "FunctionDef name:identity arg:cls arg:batch_size arg:device arg:dtype arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_TextLineDataset", + "source_code": "class _TextLineDataset(dataset_ops.DatasetSource):\n\n def __init__(self, filenames, compression_type=None, buffer_size=None, name=None):\n self._filenames = filenames\n self._compression_type = convert.optional_param_to_tensor('compression_type', compression_type, argument_default='', argument_dtype=dtypes.string)\n self._buffer_size = convert.optional_param_to_tensor('buffer_size', buffer_size, argument_default=_DEFAULT_READER_BUFFER_SIZE_BYTES)\n self._name = name\n variant_tensor = gen_dataset_ops.text_line_dataset(self._filenames, self._compression_type, self._buffer_size, metadata=self._metadata.SerializeToString())\n super(_TextLineDataset, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return tensor_spec.TensorSpec([], dtypes.string)", + "docstring": "A comprising records from one or more text files.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py", + "ast_data": "ClassDef name:_TextLineDataset FunctionDef name:__init__ arg:self arg:filenames arg:compression_type arg:buffer_size arg:name arguments arg arg arg arg arg Assign Assign Call Assign Call Assign Assign Call Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "size", + "source_code": "def size(self, name=None):\n with ops.name_scope(name, '%s_Size' % self.name):\n if self._table:\n tsize = self._table.size()\n else:\n tsize = ops.convert_to_tensor(0, dtype=dtypes.int64)\n return tsize + self._num_oov_buckets", + "docstring": "Compute the number of elements in this table.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg With Call If Assign Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "_combine", + "source_code": "def _combine(self, blocks: list[Block], index: Index | None=None) -> Self:\n if len(blocks) == 0:\n if self.ndim == 2:\n if index is not None:\n axes = [self.items[:0], index]\n else:\n axes = [self.items[:0]] + self.axes[1:]\n return self.make_empty(axes)\n return self.make_empty()\n indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))\n inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])\n new_blocks: list[Block] = []\n for b in blocks:\n nb = b.copy(deep=False)\n nb.mgr_locs = BlockPlacement(inv_indexer[nb.mgr_locs.indexer])\n new_blocks.append(nb)\n axes = list(self.axes)\n if index is not None:\n axes[-1] = index\n axes[0] = self.items.take(indexer)\n return type(self).from_blocks(new_blocks, axes)", + "docstring": "return a new manager with the blocks", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:_combine arg:self arg:blocks arg:index arguments arg arg arg If Compare Call If Compare If Compare Assign Assign Return return:yes Call Return return:yes Call Assign Call Call Assign Call For Assign Call Assign Call Call Assign Call If Compare Assign Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "call_hook", + "source_code": "def call_hook(hook: Callable[..., Optional[torch.Tensor]], *args: Any, **kwargs: Any) -> torch.Tensor:\n result = hook(*args)\n if result is None:\n return args[0]\n elif kwargs.get('hook_type') == 'post_acc_grad_hook':\n raise RuntimeError('Tensor post accumulate grad hooks should return None.')\n return result", + "docstring": "Used by compiled autograd to handle hook returning None.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\external_utils.py", + "ast_data": "FunctionDef name:call_hook arg:hook arguments arg arg arg Assign Call If Compare Return return:yes If Compare Call Raise Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "partial_fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y=None):\n first_pass = not hasattr(self, 'components_')\n X = validate_data(self, X, accept_sparse='csr', dtype=np.float64, reset=first_pass)\n if not hasattr(self, 'random_state_'):\n self.random_state_ = check_random_state(self.random_state)\n if not hasattr(self, 'components_'):\n self.components_ = np.asarray(self.random_state_.normal(0, 0.01, (self.n_components, X.shape[1])), order='F')\n self._n_features_out = self.components_.shape[0]\n if not hasattr(self, 'intercept_hidden_'):\n self.intercept_hidden_ = np.zeros(self.n_components)\n if not hasattr(self, 'intercept_visible_'):\n self.intercept_visible_ = np.zeros(X.shape[1])\n if not hasattr(self, 'h_samples_'):\n self.h_samples_ = np.zeros((self.batch_size, self.n_components))\n self._fit(X, self.random_state_)", + "docstring": "Fit the model to the partial segment of the data X. Parameters ---------- X : ndarray of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : BernoulliRBM The fitted model.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neural_network\\_rbm.py", + "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arguments arg arg arg Assign Call Assign Call If Call Assign Call If Call Assign Call Call Assign If Call Assign Call If Call Assign Call If Call Assign Call Call Call" + }, + { + "library": "django", + "name": "ObjectNotUpdated", + "source_code": "class ObjectNotUpdated(Exception):\n pass", + "docstring": "The updated object no longer exists.", + "type": "class", + "file_path": "django\\django\\core\\exceptions.py", + "ast_data": "ClassDef name:ObjectNotUpdated" + }, + { + "library": "scrapy", + "name": "update", + "source_code": "def update(self, values: _SettingsInputT, priority: int | str='project') -> None:\n self._assert_mutability()\n if isinstance(values, str):\n values = cast(dict[_SettingsKeyT, Any], json.loads(values))\n if values is not None:\n if isinstance(values, BaseSettings):\n for name, value in values.items():\n self.set(name, value, cast(int, values.getpriority(name)))\n else:\n for name, value in values.items():\n self.set(name, value, priority)", + "docstring": "Store key/value pairs with a given priority. This is a helper function that calls :meth: for every item of `~scrapy.settings.BaseSettings~scrapy.settings.BaseSettings~scrapy.settings.SETTINGS_PRIORITIES` or an integer :type priority: str or int", + "type": "method", + "file_path": "scrapy\\scrapy\\settings\\__init__.py", + "ast_data": "FunctionDef name:update arg:self arg:values arg:priority arguments arg arg arg Call If Call Assign Call Call If Compare If Call For Call Call Call Call For Call Call" + }, + { + "library": "scipy", + "name": "dot", + "source_code": "def dot(self, x):\n if isinstance(x, LinearOperator):\n return _ProductLinearOperator(self, x)\n elif np.isscalar(x):\n return _ScaledLinearOperator(self, x)\n else:\n if not issparse(x) and (not is_pydata_spmatrix(x)):\n x = np.asarray(x)\n if x.ndim == 1 or (x.ndim == 2 and x.shape[1] == 1):\n return self.matvec(x)\n elif x.ndim == 2:\n return self.matmat(x)\n else:\n raise ValueError(f'expected 1-d or 2-d array or matrix, got {x!r}')", + "docstring": "Matrix-matrix or matrix-vector multiplication. Parameters ---------- x : array_like 1-d or 2-d array, representing a vector or matrix. Returns ------- Ax : array 1-d or 2-d array (depending on the shape of x) that represents the result of applying this linear operator on x.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py", + "ast_data": "FunctionDef name:dot arg:self arg:x arguments arg arg If Call Return return:yes Call If Call Return return:yes Call If BoolOp Call Call Assign Call If BoolOp Compare BoolOp Compare Compare Return return:yes Call If Compare Return return:yes Call Raise Call" + }, + { + "library": "tensorflow", + "name": "scale_regularization_loss", + "source_code": "@tf_export('nn.scale_regularization_loss')\n@dispatch.add_dispatch_support\ndef scale_regularization_loss(regularization_loss):\n if distribute_lib.has_strategy() and distribute_lib.in_cross_replica_context():\n raise RuntimeError('You are calling `scale_regularization_loss` in cross replica context, while it was expected to be called in replica context.')\n num_replicas = distribute_lib.get_strategy().num_replicas_in_sync\n return math_ops.reduce_sum(regularization_loss) / num_replicas", + "docstring": "Scales the sum of the given regularization losses by number of replicas. Usage with distribution strategy and custom training loop: Args: regularization_loss: Regularization loss. Returns: Scalar loss value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_impl_distribute.py", + "ast_data": "FunctionDef name:scale_regularization_loss arg:regularization_loss arguments arg If BoolOp Call Call Raise Call Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_GreaterThanEq", + "source_code": "class _GreaterThanEq(Constraint):\n\n def __init__(self, lower_bound):\n self.lower_bound = lower_bound\n super().__init__()\n\n def check(self, value):\n return self.lower_bound <= value\n\n def __repr__(self):\n fmt_string = self.__class__.__name__[1:]\n fmt_string += f'(lower_bound={self.lower_bound})'\n return fmt_string", + "docstring": "Constrain to a real half line .", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\constraints.py", + "ast_data": "ClassDef name:_GreaterThanEq FunctionDef name:__init__ arg:self arg:lower_bound arguments arg arg Assign Call Call FunctionDef name:check arg:self arg:value arguments arg arg Return return:yes Compare FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "UsersPage", + "source_code": "class UsersPage:\n\n @cherrypy.expose\n def index(self):\n return '\\n Remi Delon
\\n Hendrik Mans
\\n Lorenzo Lamas
\\n '\n\n @cherrypy.expose\n def default(self, user):\n if user == 'remi':\n out = 'Remi Delon, CherryPy lead developer'\n elif user == 'hendrik':\n out = 'Hendrik Mans, CherryPy co-developer & crazy German'\n elif user == 'lorenzo':\n out = 'Lorenzo Lamas, famous actor and singer!'\n else:\n out = 'Unknown user. :-('\n return '%s (back)' % out", + "docstring": "The users app.", + "type": "class", + "file_path": "cherrypy\\cherrypy\\tutorial\\tut06_default_method.py", + "ast_data": "ClassDef name:UsersPage FunctionDef name:index arg:self arguments arg Return return:yes FunctionDef name:default arg:self arg:user arguments arg arg If Compare Assign If Compare Assign If Compare Assign Assign Return return:yes" + }, + { + "library": "django", + "name": "_i18n_cache_key_suffix", + "source_code": "def _i18n_cache_key_suffix(request, cache_key):\n if settings.USE_I18N:\n cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())\n if settings.USE_TZ:\n cache_key += '.%s' % get_current_timezone_name()\n return cache_key", + "docstring": "If necessary, add the current locale or time zone to the cache key.", + "type": "function", + "file_path": "django\\django\\utils\\cache.py", + "ast_data": "FunctionDef name:_i18n_cache_key_suffix arg:request arg:cache_key arguments arg arg If Call Call If Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_constrained_layout_pads", + "source_code": "def get_constrained_layout_pads(self, relative=False):\n return self._parent.get_constrained_layout_pads(relative=relative)", + "docstring": "Get padding for `constrainedlayout_guideTrue`, then convert from inches to figure relative.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:get_constrained_layout_pads arg:self arg:relative arguments arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "StrOptions", + "source_code": "class StrOptions(Options):\n\n def __init__(self, options, *, deprecated=None):\n super().__init__(type=str, options=options, deprecated=deprecated)", + "docstring": "Constraint representing a finite set of strings. Parameters ---------- options : set of str The set of valid strings. deprecated : set of str or None, default=None A subset of the to mark as deprecated in the string representation of the constraint.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py", + "ast_data": "ClassDef name:StrOptions FunctionDef name:__init__ arg:self arg:options arguments arg arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "xla_intermediates", + "source_code": "@property\ndef xla_intermediates(self):\n return self._xla_intermediates", + "docstring": "Raw intermediates captured from the forward graph if XLA is enabled.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py", + "ast_data": "FunctionDef name:xla_intermediates arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_to_tensor", + "source_code": "def _to_tensor(x, dtype):\n return tensor_conversion.convert_to_tensor_v2_with_dispatch(x, dtype=dtype)", + "docstring": "Convert the input to a tensor of type . Args: x: An object to be converted (numpy array, list, tensors). dtype: The destination type. Returns: A tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:_to_tensor arg:x arg:dtype arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "parse", + "source_code": "def parse(version: str) -> 'Version':\n return Version(version)", + "docstring": "Parse the given version string. >>> parse('1.0.dev1') :param version: The version string to parse. :raises InvalidVersion: When the version string is not a valid version.", + "type": "function", + "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py", + "ast_data": "FunctionDef name:parse arg:version arguments arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "_str_term_ascii", + "source_code": "@classmethod\ndef _str_term_ascii(cls, i, arg_str):\n if cls.basis_name is None:\n raise NotImplementedError('Subclasses must define either a basis_name, or override _str_term_ascii(cls, i, arg_str)')\n return f' {cls.basis_name}_{i}({arg_str})'", + "docstring": "String representation of a single polynomial term using ** and _ to represent superscripts and subscripts, respectively.", + "type": "method", + "file_path": "numpy\\numpy\\polynomial\\_polybase.py", + "ast_data": "FunctionDef name:_str_term_ascii arg:cls arg:i arg:arg_str arguments arg arg arg If Compare Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "is_closed", + "source_code": "def is_closed(self, name=None):\n if name is None:\n name = '%s_Is_Closed' % self._name\n if self._queue_ref.dtype == _dtypes.resource:\n return gen_data_flow_ops.queue_is_closed_v2(self._queue_ref, name=name)\n else:\n return gen_data_flow_ops.queue_is_closed_(self._queue_ref, name=name)", + "docstring": "Returns true if queue is closed. This operation returns true if the queue is closed and false if the queue is open. >>> q = tf.queue.FIFOQueue(capacity=3, dtypes=tf.int32) >>> q.is_closed() Args: name: A name for the operation (optional). Returns: True if the queue is closed and false if the queue is open.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", + "ast_data": "FunctionDef name:is_closed arg:self arg:name arguments arg arg If Compare Assign If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "in_eager_mode", + "source_code": "def in_eager_mode():\n return executing_eagerly()", + "docstring": "Use executing_eagerly() instead. This function will be removed.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:in_eager_mode arguments Return return:yes Call" + }, + { + "library": "pandas", + "name": "equal_levels", + "source_code": "def equal_levels(self, other: MultiIndex) -> bool:\n if self.nlevels != other.nlevels:\n return False\n for i in range(self.nlevels):\n if not self.levels[i].equals(other.levels[i]):\n return False\n return True", + "docstring": "Return True if the levels of both MultiIndex objects are the same", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "FunctionDef name:equal_levels arg:self arg:other arguments arg arg If Compare Return return:yes For Call If Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_x", + "source_code": "def set_x(self, x):\n self._x0 = x\n self.stale = True", + "docstring": "Set the left coordinate of the rectangle.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:set_x arg:self arg:x arguments arg arg Assign Assign" + }, + { + "library": "pytorch", + "name": "extract_tensor_metadata_for_cache_key", + "source_code": "def extract_tensor_metadata_for_cache_key(t: Tensor) -> TensorMetadata:\n meta = extract_tensor_metadata(t)\n if not hasattr(t, '_is_inductor_static'):\n meta = dataclasses.replace(meta, storage_offset=0, storage_bytes=None)\n return meta", + "docstring": "Extracts the tensor metadata and removes fields of the TensorMetadata that are not needed for caching", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\codecache.py", + "ast_data": "FunctionDef name:extract_tensor_metadata_for_cache_key arg:t arguments arg Assign Call If Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "get_versions_from_code", + "source_code": "def get_versions_from_code() -> dict[str, str]:\n install_map = _optional.INSTALL_MAPPING\n inverse_install_map = {v: k for k, v in install_map.items()}\n versions = _optional.VERSIONS\n for item in EXCLUDE_DEPS:\n item = inverse_install_map.get(item, item)\n versions.pop(item, None)\n return {install_map.get(k, k).casefold(): v for k, v in versions.items()}", + "docstring": "Min versions for checking within pandas code.", + "type": "function", + "file_path": "pandas\\scripts\\validate_min_versions_in_sync.py", + "ast_data": "FunctionDef name:get_versions_from_code arguments Assign Assign Call Assign For Assign Call Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, layout, inputs, make_kernel_render, mutated_inputs: Optional[Iterable[IRNode]]=None, allowed_prologue_inps: Optional[OrderedSet[str]]=None) -> None:\n super().__init__(layout, inputs, make_kernel_render)\n self.mutated_inputs = mutated_inputs\n self.outputs: list[Buffer] = [self]\n if mutated_inputs is not None:\n allowed_set = (torch.ops.higher_order.flex_attention, torch.ops.higher_order.flex_attention_backward)\n current_node = V.graph.current_node.target\n assert current_node in allowed_set, f'Mutated inputs are only allowed for {allowed_set} but got {current_node}'\n device = self.inputs[0].get_device()\n self.outputs += [MutationOutput(NoneLayout(device=device), buf, self) for buf in mutated_inputs]\n self.allowed_prologue_inps = allowed_prologue_inps if allowed_prologue_inps else OrderedSet()\n self.subgraph_inps: Optional[list[Optional[Union[IRNode, sympy.Expr]]]] = None\n self.subgraph_outs: Optional[list[Optional[IRNode]]] = None", + "docstring": "NOTE:[TritonTemplates with multiple outputs] We want the ability for TritonTemplates to output multiple tensors. Triton kernels have no notion of outputs and this is done by creating tensors that are then mutated by the kernel. Currenlty our STORE_OUTPUT codegen doesn't support creating multinode outputs for triton templates. We work around this by creating an extra input buffer during the lowering and we mark them as mutated inputs.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:layout arg:inputs arg:make_kernel_render arg:mutated_inputs arg:allowed_prologue_inps arguments arg arg arg arg arg arg Call Call Assign If Compare Assign Assign Compare Assign Call Call Call Assign Call" + }, + { + "library": "pytorch", + "name": "ConvBnReLU1d", + "source_code": "class ConvBnReLU1d(_FusedModule):\n\n def __init__(self, conv, bn, relu):\n assert type_before_parametrizations(conv) == Conv1d and type_before_parametrizations(bn) == BatchNorm1d and (type_before_parametrizations(relu) == ReLU), f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}{type_before_parametrizations(relu)}'\n super().__init__(conv, bn, relu)", + "docstring": "This is a sequential container which calls the Conv 1d, Batch Norm 1d, and ReLU modules. During quantization this will be replaced with the corresponding fused module.", + "type": "class", + "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py", + "ast_data": "ClassDef name:ConvBnReLU1d FunctionDef name:__init__ arg:self arg:conv arg:bn arg:relu arguments arg arg arg arg BoolOp Compare Call Compare Call Compare Call Call Call Call Call Call" + }, + { + "library": "sphinx", + "name": "parse_stop_word", + "source_code": "def parse_stop_word(source: str) -> set[str]:\n result: set[str] = set()\n for line in source.splitlines():\n line = line.split('|')[0]\n result.update(line.split())\n return result", + "docstring": "Collect the stopwords from a snowball style word list: .. code:: text list of space separated stop words | optional comment", + "type": "function", + "file_path": "sphinx\\sphinx\\search\\__init__.py", + "ast_data": "FunctionDef name:parse_stop_word arg:source arguments arg Call For Call Assign Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_handle_toggle", + "source_code": "def _handle_toggle(self, tool, canvasevent, data):\n radio_group = tool.radio_group\n if radio_group is None:\n if tool.name in self._toggled[None]:\n self._toggled[None].remove(tool.name)\n else:\n self._toggled[None].add(tool.name)\n return\n if self._toggled[radio_group] == tool.name:\n toggled = None\n elif self._toggled[radio_group] is None:\n toggled = tool.name\n else:\n self.trigger_tool(self._toggled[radio_group], self, canvasevent, data)\n toggled = tool.name\n self._toggled[radio_group] = toggled", + "docstring": "Toggle tools, need to untoggle prior to using other Toggle tool. Called from trigger_tool. Parameters ---------- tool : canvasevent : Event Original Canvas event or None. data : object Extra data to pass to the tool when triggering.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py", + "ast_data": "FunctionDef name:_handle_toggle arg:self arg:tool arg:canvasevent arg:data arguments arg arg arg arg Assign If Compare If Compare Call Call Return return:no If Compare Assign If Compare Assign Call Assign Assign" + }, + { + "library": "pytorch", + "name": "get_default_dynamic_sparse_quant_module_mappings", + "source_code": "def get_default_dynamic_sparse_quant_module_mappings() -> dict[Callable, Any]:\n return DEFAULT_DYNAMIC_SPARSE_QUANT_MODULE_MAPPINGS", + "docstring": "Get module mapping for post training dynamic sparse quantization", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py", + "ast_data": "FunctionDef name:get_default_dynamic_sparse_quant_module_mappings arguments Return return:yes" + }, + { + "library": "tensorflow", + "name": "map_subgraph", + "source_code": "def map_subgraph(init_tensor, sources, disallowed_placeholders, visited_ops, op_outputs, add_sources):\n ops_to_visit = [_as_operation(init_tensor)]\n extra_sources = object_identity.ObjectIdentitySet()\n while ops_to_visit:\n op = ops_to_visit.pop()\n if op in visited_ops:\n continue\n visited_ops.add(op)\n should_raise = False\n if disallowed_placeholders is not None and op in disallowed_placeholders:\n should_raise = True\n elif op.type == 'Placeholder':\n if disallowed_placeholders is None and (not add_sources):\n should_raise = True\n extra_sources.update(op.outputs)\n if should_raise:\n raise UnliftableError('Unable to lift tensor %s because it depends transitively on placeholder %s via at least one path, e.g.: %s' % (repr(init_tensor), repr(op), show_path(op, init_tensor, sources)))\n for inp in graph_inputs(op):\n op_outputs[inp].add(op)\n if inp not in visited_ops and inp not in (sources or extra_sources):\n ops_to_visit.append(inp)\n return extra_sources", + "docstring": "Walk a Graph and capture the subgraph between init_tensor and sources. Note: This function mutates visited_ops and op_outputs. Args: init_tensor: A Tensor or Operation where the subgraph terminates. sources: A set of Tensors where subgraph extraction should stop. disallowed_placeholders: An optional set of ops which may not appear in the lifted graph. Defaults to all placeholders. visited_ops: A set of operations which were visited in a prior pass. op_outputs: A defaultdict containing the outputs of an op which are to be copied into the new subgraph. add_sources: A boolean indicating whether placeholders which are not in sources should be allowed. Returns: The set of placeholders upon which init_tensor depends and are not in sources. Raises: UnliftableError: if init_tensor depends on a placeholder which is not in sources and add_sources is False.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\op_selector.py", + "ast_data": "FunctionDef name:map_subgraph arg:init_tensor arg:sources arg:disallowed_placeholders arg:visited_ops arg:op_outputs arg:add_sources arguments arg arg arg arg arg arg Assign Call Assign Call While Assign Call If Compare Call Assign If BoolOp Compare Compare Assign If Compare If BoolOp Compare Assign Call If Raise Call Call Call Call For Call Call If BoolOp Compare Compare BoolOp Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "NullLocator", + "source_code": "class NullLocator(Locator):\n\n def __call__(self):\n return self.tick_values(None, None)\n\n def tick_values(self, vmin, vmax):\n return []", + "docstring": "Place no ticks.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "ClassDef name:NullLocator FunctionDef name:__call__ arg:self arguments arg Return return:yes Call FunctionDef name:tick_values arg:self arg:vmin arg:vmax arguments arg arg arg Return return:no" + }, + { + "library": "scipy", + "name": "_percentile_along_axis", + "source_code": "def _percentile_along_axis(theta_hat_b, alpha):\n shape = theta_hat_b.shape[:-1]\n alpha = np.broadcast_to(alpha, shape)\n percentiles = np.zeros_like(alpha, dtype=np.float64)\n for indices, alpha_i in np.ndenumerate(alpha):\n if np.isnan(alpha_i):\n msg = 'The BCa confidence interval cannot be calculated. This problem is known to occur when the distribution is degenerate or the statistic is np.min.'\n warnings.warn(DegenerateDataWarning(msg), stacklevel=3)\n percentiles[indices] = np.nan\n else:\n theta_hat_b_i = theta_hat_b[indices]\n percentiles[indices] = np.percentile(theta_hat_b_i, alpha_i)\n return percentiles[()]", + "docstring": "with different percentile for each slice.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_resampling.py", + "ast_data": "FunctionDef name:_percentile_along_axis arg:theta_hat_b arg:alpha arguments arg arg Assign Assign Call Assign Call For Call If Call Assign Call Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "__init__", + "source_code": "def __init__(self, data: Tensor) -> None:\n super().__init__()\n self._data = Parameter(data)", + "docstring": "Construct the base class. Args: data: tensor containing the quaternion data with the sape of :math:. Example: >>> data = torch.rand(2, 4) >>> q = Quaternion(data) >>> q.shape (2, 4)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\quaternion.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:data arguments arg arg Call Call Assign Call" + }, + { + "library": "django", + "name": "ServerSideCursor", + "source_code": "class ServerSideCursor(CursorMixin, Database.client_cursor.ClientCursorMixin, Database.ServerCursor):\n pass", + "docstring": "psycopg >= 3 forces the usage of server-side bindings when using named cursors but the ORM doesn't yet support the systematic generation of prepareable SQL (#20516). ClientCursorMixin forces the usage of client-side bindings while ServerCursor implements the logic required to declare and scroll through named cursors. Mixing ClientCursorMixin in wouldn't be necessary if Cursor allowed to specify how parameters should be bound instead, which ServerCursor would inherit, but that's not the case.", + "type": "class", + "file_path": "django\\django\\db\\backends\\postgresql\\base.py", + "ast_data": "ClassDef name:ServerSideCursor" + }, + { + "library": "matplotlib", + "name": "plot", + "source_code": "def plot(self, xs, ys, *args, zdir='z', axlim_clip=False, **kwargs):\n had_data = self.has_data()\n if args and (not isinstance(args[0], str)):\n zs, *args = args\n if 'zs' in kwargs:\n raise TypeError(\"plot() for multiple values for argument 'zs'\")\n else:\n zs = kwargs.pop('zs', 0)\n xs, ys, zs = cbook._broadcast_with_masks(xs, ys, zs)\n lines = super().plot(xs, ys, *args, **kwargs)\n for line in lines:\n art3d.line_2d_to_3d(line, zs=zs, zdir=zdir, axlim_clip=axlim_clip)\n xs, ys, zs = art3d.juggle_axes(xs, ys, zs, zdir)\n self.auto_scale_xyz(xs, ys, zs, had_data)\n return lines", + "docstring": "Plot 2D or 3D data. Parameters ---------- xs : 1D array-like x coordinates of vertices. ys : 1D array-like y coordinates of vertices. zs : float or 1D array-like z coordinates of vertices; either one for all points or one for each point. zdir : {'x', 'y', 'z'}, default: 'z' When plotting 2D data, the direction to use as z. axlim_clip : bool, default: False Whether to hide data that is outside the axes view limits. .. versionadded:: 3.10 **kwargs Other arguments are forwarded to .", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", + "ast_data": "FunctionDef name:plot arg:self arg:xs arg:ys arguments arg arg arg arg arg arg arg Assign Call If BoolOp Call Assign If Compare Raise Call Assign Call Assign Call Assign Call Call For Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, resource_handle, create_op, name):\n stamp_token, serialized = gen_boosted_trees_ops.boosted_trees_serialize_ensemble(resource_handle)\n slice_spec = ''\n specs = [saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec, name + '_stamp'), saver.BaseSaverBuilder.SaveSpec(serialized, slice_spec, name + '_serialized')]\n super(_TreeEnsembleSavable, self).__init__(resource_handle, specs, name)\n self.resource_handle = resource_handle\n self._create_op = create_op", + "docstring": "Creates a _TreeEnsembleSavable object. Args: resource_handle: handle to the decision tree ensemble variable. create_op: the op to initialize the variable. name: the name to save the tree ensemble variable under.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\boosted_trees_ops.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:resource_handle arg:create_op arg:name arguments arg arg arg arg Assign Call Assign Assign Call Call Call Call Assign Assign" + }, + { + "library": "pandas", + "name": "_reset_identity", + "source_code": "@final\ndef _reset_identity(self) -> None:\n self._id = object()", + "docstring": "Initializes or resets `` attribute with new object.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_reset_identity arg:self arguments arg Assign Call" + }, + { + "library": "tensorflow", + "name": "_get_applicable_dict", + "source_code": "def _get_applicable_dict(self, transformer_field, full_name, name):\n function_transformers = getattr(self._api_change_spec, transformer_field, {})\n glob_name = '*.' + name if name else None\n transformers = function_transformers.get('*', {}).copy()\n transformers.update(function_transformers.get(glob_name, {}))\n transformers.update(function_transformers.get(full_name, {}))\n return transformers", + "docstring": "Get all dict entries indexed by name that apply to full_name or name.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py", + "ast_data": "FunctionDef name:_get_applicable_dict arg:self arg:transformer_field arg:full_name arg:name arguments arg arg arg arg Assign Call Assign Assign Call Call Call Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "_line_for_search", + "source_code": "def _line_for_search(x0, alpha, lower_bound, upper_bound):\n nonzero, = alpha.nonzero()\n lower_bound, upper_bound = (lower_bound[nonzero], upper_bound[nonzero])\n x0, alpha = (x0[nonzero], alpha[nonzero])\n low = (lower_bound - x0) / alpha\n high = (upper_bound - x0) / alpha\n pos = alpha > 0\n lmin_pos = np.where(pos, low, 0)\n lmin_neg = np.where(pos, 0, high)\n lmax_pos = np.where(pos, high, 0)\n lmax_neg = np.where(pos, 0, low)\n lmin = np.max(lmin_pos + lmin_neg)\n lmax = np.min(lmax_pos + lmax_neg)\n return (lmin, lmax) if lmax >= lmin else (0, 0)", + "docstring": "Given a parameter vector ``.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_optimize.py", + "ast_data": "FunctionDef name:_line_for_search arg:x0 arg:alpha arg:lower_bound arg:upper_bound arguments arg arg arg arg Assign Call Assign Assign Assign Assign Assign Compare Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Compare" + }, + { + "library": "numpy", + "name": "_splitlines", + "source_code": "@array_function_dispatch(_splitlines_dispatcher)\ndef _splitlines(a, keepends=None):\n return _vec_string(a, np.object_, 'splitlines', _clean_args(keepends))", + "docstring": "For each element in , return a list of the lines in the element, breaking at line boundaries. Calls :meth: element-wise. Parameters ---------- a : array-like, with `` dtype keepends : bool, optional Line breaks are not included in the resulting list unless keepends is given and true. Returns ------- out : ndarray Array of list objects See Also -------- str.splitlines Examples -------- >>> np.char.splitlines(\"first line\\nsecond line\") array(list(['first line', 'second line']), dtype=object) >>> a = np.array([\"first\\nsecond\", \"third\\nfourth\"]) >>> np.char.splitlines(a) array([list(['first', 'second']), list(['third', 'fourth'])], dtype=object)", + "type": "function", + "file_path": "numpy\\numpy\\_core\\strings.py", + "ast_data": "FunctionDef name:_splitlines arg:a arg:keepends arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "set_rng_state", + "source_code": "def set_rng_state(new_state: torch.Tensor) -> None:\n default_generator.set_state(new_state)", + "docstring": "Sets the random number generator state. .. note:: This function only works for CPU. For CUDA, please use :func:, which works for both CPU and CUDA. Args: new_state (torch.ByteTensor): The desired state", + "type": "function", + "file_path": "pytorch\\torch\\random.py", + "ast_data": "FunctionDef name:set_rng_state arg:new_state arguments arg Call" + }, + { + "library": "kornia", + "name": "left_to_right_epipolar_distance", + "source_code": "def left_to_right_epipolar_distance(pts1: Tensor, pts2: Tensor, Fm: Tensor) -> Tensor:\n KORNIA_CHECK_IS_TENSOR(pts1)\n KORNIA_CHECK_IS_TENSOR(pts2)\n KORNIA_CHECK_IS_TENSOR(Fm)\n if len(Fm.shape) < 3 or not Fm.shape[-2:] == (3, 3):\n raise ValueError(f'Fm must be a (*, 3, 3) tensor. Got {Fm.shape}')\n if pts1.shape[-1] == 2:\n pts1 = convert_points_to_homogeneous(pts1)\n F_t: Tensor = Fm.transpose(dim0=-2, dim1=-1)\n line1_in_2: Tensor = pts1 @ F_t\n return point_line_distance(pts2, line1_in_2)", + "docstring": "Return one-sided epipolar distance for correspondences given the fundamental matrix. This method measures the distance from points in the right images to the epilines of the corresponding points in the left images as they reflect in the right images. Args: pts1: correspondences from the left images with shape :math:. If they are not homogeneous, converted automatically. pts2: correspondences from the right images with shape :math:. If they are not homogeneous, converted automatically. Fm: Fundamental matrices with shape :math:. Called Fm to avoid ambiguity with torch.nn.functional. Returns: the computed Symmetrical distance with shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\epipolar\\_metrics.py", + "ast_data": "FunctionDef name:left_to_right_epipolar_distance arg:pts1 arg:pts2 arg:Fm arguments arg arg arg Call Call Call If BoolOp Compare Call Compare Raise Call If Compare Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "TfrGen", + "source_code": "class TfrGen(transpiler.GenericTranspiler):\n\n def __init__(self, op_defs):\n self._op_defs = op_defs\n\n def transform_ast(self, node, ctx):\n node = _apply_py_to_tf_passes(node, ctx)\n graphs = cfg.build(node)\n node = qual_names.resolve(node)\n node = activity.resolve(node, ctx)\n node = reaching_definitions.resolve(node, ctx, graphs)\n node = reaching_fndefs.resolve(node, ctx, graphs)\n node = type_inference.resolve(node, ctx, graphs, TFRTypeResolver(self._op_defs))\n mlir_generator = TFRGen(ctx, self._op_defs)\n mlir_generator.visit(node)\n return mlir_generator.code_buffer", + "docstring": "Transforms Python objects into TFR MLIR source code.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\tfr_gen.py", + "ast_data": "ClassDef name:TfrGen FunctionDef name:__init__ arg:self arg:op_defs arguments arg arg Assign FunctionDef name:transform_ast arg:self arg:node arg:ctx arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "seaborn", + "name": "set_bandwidth", + "source_code": "def set_bandwidth(self, bw_method=None):\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and (not isinstance(bw_method, str)):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar or a callable.\"\n raise ValueError(msg)\n self._compute_covariance()", + "docstring": "Compute the estimator bandwidth with given method. The new bandwidth calculated after a call to is used for subsequent evaluations of the estimated density. Parameters ---------- bw_method : str, scalar or callable, optional The method used to calculate the estimator bandwidth. This can be 'scott', 'silverman', a scalar constant or a callable. If a scalar, this will be used directly as . If a callable, it should take a instance as only parameter and return a scalar. If None (default), nothing happens; the current method is kept. Notes ----- .. versionadded:: 0.11", + "type": "method", + "file_path": "seaborn\\seaborn\\external\\kde.py", + "ast_data": "FunctionDef name:set_bandwidth arg:self arg:bw_method arguments arg arg If Compare If Compare Assign If Compare Assign If BoolOp Call Call Assign Assign arguments If Call Assign Assign arguments Call Assign Raise Call Call" + }, + { + "library": "pytorch", + "name": "from_onnx_type", + "source_code": "@classmethod\ndef from_onnx_type(cls, onnx_type: int | _C_onnx.TensorProtoDataType | None) -> JitScalarType:\n if onnx_type not in _ONNX_TO_SCALAR_TYPE:\n raise errors.OnnxExporterError(f'Unknown onnx_type: {onnx_type}')\n return _ONNX_TO_SCALAR_TYPE[typing.cast(_C_onnx.TensorProtoDataType, onnx_type)]", + "docstring": "Convert a ONNX data type to JitScalarType. Args: onnx_type: A torch._C._onnx.TensorProtoDataType to create a JitScalarType from Returns: JitScalarType Raises: OnnxExporterError: if dtype is not a valid torch.dtype or if it is None.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_type_utils.py", + "ast_data": "FunctionDef name:from_onnx_type arg:cls arg:onnx_type arguments arg arg If Compare Raise Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "enforce_as_strided_input_layout", + "source_code": "def enforce_as_strided_input_layout(gm: torch.fx.GraphModule):\n as_strided_ops = [torch.ops.aten.as_strided.default, torch.ops.aten.as_strided_.default, torch.ops.aten.as_strided_scatter.default]\n strided_nodes = [n for n in gm.graph.nodes if n.target in as_strided_ops]\n for n in strided_nodes:\n with gm.graph.inserting_before(n):\n ft = n.args[0].meta['val']\n new_node = gm.graph.call_function(prims.inductor_force_stride_order.default, (n.args[0], ft.stride()))\n n.replace_input_with(n.args[0], new_node)\n gm.graph.lint()\n gm.recompile()", + "docstring": "Make sure the as_strided node's input's layout does not change due to compiler optimizations, because the as_strided strides info depends on input tensor stride info.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\freezing.py", + "ast_data": "FunctionDef name:enforce_as_strided_input_layout arg:gm arguments arg Assign Assign Compare For With Call Assign Assign Call Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "mahalanobis", + "source_code": "def mahalanobis(self, X):\n X = validate_data(self, X, reset=False)\n precision = self.get_precision()\n with config_context(assume_finite=True):\n dist = pairwise_distances(X, self.location_[np.newaxis, :], metric='mahalanobis', VI=precision)\n return np.reshape(dist, (len(X),)) ** 2", + "docstring": "Compute the squared Mahalanobis distances of given observations. Parameters ---------- X : array-like of shape (n_samples, n_features) The observations, the Mahalanobis distances of the which we compute. Observations are assumed to be drawn from the same distribution than the data used in fit. Returns ------- dist : ndarray of shape (n_samples,) Squared Mahalanobis distances of the observations.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\covariance\\_empirical_covariance.py", + "ast_data": "FunctionDef name:mahalanobis arg:self arg:X arguments arg arg Assign Call Assign Call With Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "parameters", + "source_code": "@property\ndef parameters(self):\n return dict(self._parameters)", + "docstring": "Dictionary of parameters used to instantiate this .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:parameters arg:self arguments arg Return return:yes Call" + }, + { + "library": "seaborn", + "name": "__init__", + "source_code": "def __init__(self, estimator, errorbar=None, **boot_kws):\n if estimator != 'mean':\n raise ValueError(f\"Weighted estimator must be 'mean', not {estimator!r}.\")\n self.estimator = estimator\n method, level = _validate_errorbar_arg(errorbar)\n if method is not None and method != 'ci':\n raise ValueError(f\"Error bar method must be 'ci', not {method!r}.\")\n self.error_method = method\n self.error_level = level\n self.boot_kws = boot_kws", + "docstring": "Data aggregator that produces a weighted estimate and error bar interval. Parameters ---------- estimator : string Function (or method name) that maps a vector to a scalar. Currently supports only \"mean\". errorbar : string or (string, number) tuple Name of errorbar method or a tuple with a method name and a level parameter. Currently the only supported method is \"ci\". boot_kws Additional keywords are passed to bootstrap when error_method is \"ci\".", + "type": "method", + "file_path": "seaborn\\seaborn\\_statistics.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:estimator arg:errorbar arguments arg arg arg arg If Compare Raise Call Assign Assign Call If BoolOp Compare Compare Raise Call Assign Assign Assign" + }, + { + "library": "django", + "name": "check_settings", + "source_code": "def check_settings(base_url=None):\n if base_url is None:\n base_url = settings.STATIC_URL\n if not base_url:\n raise ImproperlyConfigured(\"You're using the staticfiles app without having set the required STATIC_URL setting.\")\n if settings.MEDIA_URL == base_url:\n raise ImproperlyConfigured('The MEDIA_URL and STATIC_URL settings must have different values')\n if settings.DEBUG and settings.MEDIA_URL and settings.STATIC_URL and settings.MEDIA_URL.startswith(settings.STATIC_URL):\n raise ImproperlyConfigured(\"runserver can't serve media if MEDIA_URL is within STATIC_URL.\")\n if (settings.MEDIA_ROOT and settings.STATIC_ROOT) and settings.MEDIA_ROOT == settings.STATIC_ROOT:\n raise ImproperlyConfigured('The MEDIA_ROOT and STATIC_ROOT settings must have different values')", + "docstring": "Check if the staticfiles settings have sane values.", + "type": "function", + "file_path": "django\\django\\contrib\\staticfiles\\utils.py", + "ast_data": "FunctionDef name:check_settings arg:base_url arguments arg If Compare Assign If Raise Call If Compare Raise Call If BoolOp Call Raise Call If BoolOp BoolOp Compare Raise Call" + }, + { + "library": "tensorflow", + "name": "next_layer", + "source_code": "def next_layer(self, original_rp, broadcast_rp):\n gather_index = _next_layer_gather_index(self, original_rp, broadcast_rp)\n return _LayerBroadcaster.from_gather_index(gather_index)", + "docstring": "Create the next layer gather_index whether or not a broadcast happens. *---------self------->* | | original_rp broadcast_rp | | \\|/ \\|/ *--next_broadcaster-->* Args: original_rp: the original row partition. broadcast_rp: the target row partition. Returns: the gather_index for next_broadcaster.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:next_layer arg:self arg:original_rp arg:broadcast_rp arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_saver_or_default", + "source_code": "def _get_saver_or_default():\n collection_key = ops.GraphKeys.SAVERS\n savers = ops.get_collection(collection_key)\n if savers:\n if len(savers) > 1:\n raise RuntimeError('More than one item in collection {}. Please indicate which one to use by passing it to the constructor.'.format(collection_key))\n return savers[0]\n saver = Saver(sharded=True, allow_empty=True)\n if saver is not None:\n ops.add_to_collection(collection_key, saver)\n return saver", + "docstring": "Returns the saver from SAVERS collection, or creates a default one. This method is used by other members of the training module, such as , or . Returns: . Raises: RuntimeError: If the SAVERS collection already has more than one items.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py", + "ast_data": "FunctionDef name:_get_saver_or_default arguments Assign Assign Call If If Compare Call Raise Call Call Return return:yes Assign Call If Compare Call Return return:yes" + }, + { + "library": "kornia", + "name": "vit_small", + "source_code": "def vit_small(patch_size=16, **kwargs) -> DinoVisionTransformer:\n model = DinoVisionTransformer(patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, block_fn=partial(Block, attn_class=MemEffAttention), **kwargs)\n return model", + "docstring": "Return ViT Small.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\dinov2.py", + "ast_data": "FunctionDef name:vit_small arg:patch_size arguments arg arg Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "variable_op_v2", + "source_code": "def variable_op_v2(shape, dtype, name='Variable', container='', shared_name=''):\n return gen_state_ops.variable_v2(shape=shape, dtype=dtype, name=name, container=container, shared_name=shared_name)", + "docstring": "Create a variable Operation. See also variables.Variable. Args: shape: The shape of the tensor managed by this variable dtype: The underlying type of the tensor values. name: optional name to use for the variable op. container: An optional string. Defaults to \"\". If non-empty, this variable is placed in the given container. Otherwise, a default container is used. shared_name: An optional string. Defaults to \"\". If non-empty, this variable is named in the given bucket with this shared_name. Otherwise, the node name is used instead. Returns: A variable tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\state_ops.py", + "ast_data": "FunctionDef name:variable_op_v2 arg:shape arg:dtype arg:name arg:container arg:shared_name arguments arg arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_compute_tensor_usage_count", + "source_code": "def _compute_tensor_usage_count(self):\n tensor_usage_count = collections.Counter()\n available_tensors = set((str(id(tensor)) for tensor in self.inputs))\n depth_keys = list(self._nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n depth_keys = depth_keys[1:]\n for depth in depth_keys:\n for node in self._nodes_by_depth[depth]:\n input_tensors = {str(id(tensor)) for tensor in nest.flatten(node.keras_inputs)}\n if input_tensors.issubset(available_tensors):\n for tensor in nest.flatten(node.keras_inputs):\n tensor_usage_count[str(id(tensor))] += 1\n for output_tensor in nest.flatten(node.outputs):\n available_tensors.add(str(id(output_tensor)))\n for tensor in self.outputs:\n tensor_usage_count[str(id(tensor))] += 1\n self._tensor_usage_count = tensor_usage_count", + "docstring": "Compute the #. of tensor usages for all the output tensors of layers. The computed tensor usage count is saved as . This is later used for saving memory in eager computation by releasing no-longer-needed tensors as early as possible.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py", + "ast_data": "FunctionDef name:_compute_tensor_usage_count arg:self arguments arg Assign Call Assign Call Call Call Assign Call Call Call Assign For For Assign Call Call Call If Call For Call Call Call For Call Call Call Call For Call Call Assign" + }, + { + "library": "django", + "name": "make_style", + "source_code": "def make_style(config_string=''):\n style = Style()\n color_settings = termcolors.parse_color_setting(config_string)\n for role in termcolors.PALETTES[termcolors.NOCOLOR_PALETTE]:\n if color_settings:\n format = color_settings.get(role, {})\n style_func = termcolors.make_style(**format)\n else:\n\n def style_func(x):\n return x\n setattr(style, role, style_func)\n style.ERROR_OUTPUT = style.ERROR\n return style", + "docstring": "Create a Style object from the given config_string. If config_string is empty django.utils.termcolors.DEFAULT_PALETTE is used.", + "type": "function", + "file_path": "django\\django\\core\\management\\color.py", + "ast_data": "FunctionDef name:make_style arg:config_string arguments arg Assign Call Assign Call For If Assign Call Assign Call FunctionDef name:style_func arg:x arguments arg Return return:yes Call Assign Return return:yes" + }, + { + "library": "django", + "name": "get_urlconf", + "source_code": "def get_urlconf(default=None):\n return getattr(_urlconfs, 'value', default)", + "docstring": "Return the root URLconf to use for the current thread or asyncio task if it has been changed from the default one.", + "type": "function", + "file_path": "django\\django\\urls\\base.py", + "ast_data": "FunctionDef name:get_urlconf arg:default arguments arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "ljust", + "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_just_dispatcher)\ndef ljust(a, width, fillchar=' '):\n width = np.asanyarray(width)\n if not np.issubdtype(width.dtype, np.integer):\n raise TypeError(f\"unsupported type {width.dtype} for operand 'width'\")\n a = np.asanyarray(a)\n fillchar = np.asanyarray(fillchar)\n if np.any(str_len(fillchar) != 1):\n raise TypeError('The fill character must be exactly one character long')\n if np.result_type(a, fillchar).char == 'T':\n return _ljust(a, width, fillchar)\n fillchar = fillchar.astype(a.dtype, copy=False)\n width = np.maximum(str_len(a), width)\n shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape)\n out_dtype = f'{a.dtype.char}{width.max()}'\n out = np.empty_like(a, shape=shape, dtype=out_dtype)\n return _ljust(a, width, fillchar, out=out)", + "docstring": "Return an array with the elements of left-justified in a string of length . Parameters ---------- a : array-like, with ``width >> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.ljust(c, width=3) array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.ljust(c, width=9) array(['aAaAaA ', ' aA ', 'abBABba '], dtype=' Iterator[Sequence[str]]:\n yield from self._gen_dtypes()", + "docstring": "Iterator with string representation of body data without counts.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\info.py", + "ast_data": "FunctionDef name:_gen_rows_without_counts arg:self arguments arg Call" + }, + { + "library": "django", + "name": "apply", + "source_code": "def apply(self, project_state, schema_editor, collect_sql=False):\n for operation in self.operations:\n if collect_sql:\n schema_editor.collected_sql.append('--')\n schema_editor.collected_sql.append('-- %s' % operation.describe())\n schema_editor.collected_sql.append('--')\n if not operation.reduces_to_sql:\n schema_editor.collected_sql.append('-- THIS OPERATION CANNOT BE WRITTEN AS SQL')\n continue\n collected_sql_before = len(schema_editor.collected_sql)\n old_state = project_state.clone()\n operation.state_forwards(self.app_label, project_state)\n atomic_operation = operation.atomic or (self.atomic and operation.atomic is not False)\n if not schema_editor.atomic_migration and atomic_operation:\n with atomic(schema_editor.connection.alias):\n operation.database_forwards(self.app_label, schema_editor, old_state, project_state)\n else:\n operation.database_forwards(self.app_label, schema_editor, old_state, project_state)\n if collect_sql and collected_sql_before == len(schema_editor.collected_sql):\n schema_editor.collected_sql.append('-- (no-op)')\n return project_state", + "docstring": "Take a project_state representing all migrations prior to this one and a schema_editor for a live database and apply the migration in a forwards order. Return the resulting project state for efficient reuse by following Migrations.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\migration.py", + "ast_data": "FunctionDef name:apply arg:self arg:project_state arg:schema_editor arg:collect_sql arguments arg arg arg arg For If Call Call Call Call If Call Assign Call Assign Call Call Assign BoolOp BoolOp Compare If BoolOp With Call Call Call If BoolOp Compare Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_default_dtype_for", + "source_code": "def get_default_dtype_for(dtype):\n if dtype == torch.bool:\n return dtype\n if dtype.is_complex:\n return default_dtypes().complex_dtype\n if dtype.is_floating_point:\n return default_dtypes().float_dtype\n return default_dtypes().int_dtype", + "docstring": "Default scalar type given sctype category.", + "type": "function", + "file_path": "pytorch\\torch\\_numpy\\_dtypes_impl.py", + "ast_data": "FunctionDef name:get_default_dtype_for arg:dtype arguments arg If Compare Return return:yes If Return return:yes Call If Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "major", + "source_code": "@property\ndef major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0", + "docstring": "The first item of :attr: or `` if unavailable. >>> Version(\"1.2.3\").major 1", + "type": "method", + "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py", + "ast_data": "FunctionDef name:major arg:self arguments arg Return return:yes Compare Call" + }, + { + "library": "pytorch", + "name": "FSDPStateContext", + "source_code": "class FSDPStateContext:\n\n def __init__(self) -> None:\n self.all_states: list[FSDPState] = []\n self.iter_forward_root: Optional[FSDPState] = None\n self.post_backward_final_callback_queued: bool = False\n self.is_last_backward: bool = True\n self.post_optim_event: Optional[torch.Event] = None", + "docstring": "This has state shared across FSDP states.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_state.py", + "ast_data": "ClassDef name:FSDPStateContext FunctionDef name:__init__ arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "_runtime_zero_iterations_errmsg", + "source_code": "def _runtime_zero_iterations_errmsg(symbol_names, nulls, init_vars):\n var_names = []\n for sn, n, v in zip(symbol_names, nulls, init_vars):\n if not n:\n continue\n if isinstance(v, variables.UndefinedReturnValue):\n var_names.append('the function return value')\n else:\n var_names.append(sn)\n var_names = ', '.join(var_names)\n return 'loop must iterate at least once to initialize {}'.format(var_names)", + "docstring": "Creates an error message asking for the loop to iterate at least once.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py", + "ast_data": "FunctionDef name:_runtime_zero_iterations_errmsg arg:symbol_names arg:nulls arg:init_vars arguments arg arg arg Assign For Call If If Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "register_unary_elementwise_api", + "source_code": "def register_unary_elementwise_api(func):\n _UNARY_ELEMENTWISE_APIS.append(func)\n for args, handler in _ELEMENTWISE_API_HANDLERS.items():\n if len(args) == 1:\n _add_dispatch_for_unary_elementwise_api(func, args[0], handler)\n return func", + "docstring": "Decorator that registers a TensorFlow op as a unary elementwise API.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py", + "ast_data": "FunctionDef name:register_unary_elementwise_api arg:func arguments arg Call For Call If Compare Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "draw_if_interactive", + "source_code": "def draw_if_interactive(*args, **kwargs):\n return _get_backend_mod().draw_if_interactive(*args, **kwargs)", + "docstring": "Redraw the current figure if in interactive mode. .. warning:: End users will typically not have to call this function because the the interactive mode takes care of this.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", + "ast_data": "FunctionDef name:draw_if_interactive arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "erase", + "source_code": "def erase(self, batch_idx: torch.Tensor) -> None:\n allocated_page_idx = self.page_table[batch_idx] != -1\n allocated_pages = self.page_table[batch_idx][allocated_page_idx]\n self.capacity[batch_idx] = 0\n self.empty_pages += allocated_pages.tolist()\n self.physical_to_logical[batch_idx][:, allocated_pages] = -1\n self.page_table[batch_idx] = -1", + "docstring": "Removes a single batch from paged attention. Args: batch_idx (Tensor): batch index to be removed; shape :math:.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\attention\\experimental\\_paged_attention.py", + "ast_data": "FunctionDef name:erase arg:self arg:batch_idx arguments arg arg Assign Compare Assign Assign Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "flush", + "source_code": "@tf_export('summary.flush', v1=[])\ndef flush(writer=None, name=None):\n del name\n if writer is None:\n writer = _summary_state.writer\n if writer is None:\n return control_flow_ops.no_op()\n if isinstance(writer, SummaryWriter):\n return writer.flush()\n raise ValueError('Invalid argument to flush(): %r' % (writer,))", + "docstring": "Forces summary writer to send any buffered data to storage. This operation blocks until that finishes. Args: writer: The to flush. If None, the current default writer will be used instead; if there is no current writer, this returns . name: Ignored legacy argument for a name for the operation. Returns: The created .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:flush arg:writer arg:name arguments arg arg If Compare Assign If Compare Return return:yes Call If Call Return return:yes Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "check_device_type", + "source_code": "@staticmethod\ndef check_device_type(device_type):\n if device_type not in (_DEVICE_TYPE_TPU, _DEVICE_TYPE_CPU):\n raise ValueError('Invalid device_type \"%s\"' % device_type)", + "docstring": "Checks if the given device type is valid.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:check_device_type arg:device_type arguments arg If Compare Raise Call" + }, + { + "library": "authlib", + "name": "validate_client_uri", + "source_code": "def validate_client_uri(self):\n self._validate_uri('client_uri')", + "docstring": "URL string of a web page providing information about the client. If present, the server SHOULD display this URL to the end-user in a clickable fashion. It is RECOMMENDED that clients always send this field. The value of this field MUST point to a valid web page. The value of this field MAY be internationalized, as described in Section 2.2.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py", + "ast_data": "FunctionDef name:validate_client_uri arg:self arguments arg Call" + }, + { + "library": "tensorflow", + "name": "get_flat_tensor_specs", + "source_code": "def get_flat_tensor_specs(element_spec):\n return list(itertools.chain.from_iterable((spec._flat_tensor_specs for spec in nest.flatten(element_spec))))", + "docstring": "Returns a list s for the element tensor representation. Args: element_spec: A nested structure of objects representing to element type specification. Returns: A list s for the element tensor representation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\util\\structure.py", + "ast_data": "FunctionDef name:get_flat_tensor_specs arg:element_spec arguments arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "seed_all", + "source_code": "def seed_all() -> None:\n\n def cb():\n random_seed = 0\n seeded = False\n for i in range(device_count()):\n default_generator = torch.cuda.default_generators[i]\n if not seeded:\n default_generator.seed()\n random_seed = default_generator.initial_seed()\n seeded = True\n else:\n default_generator.manual_seed(random_seed)\n _lazy_call(cb)", + "docstring": "Set the seed for generating random numbers to a random number on all GPUs. It's safe to call this function if CUDA is not available; in that case, it is silently ignored.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\random.py", + "ast_data": "FunctionDef name:seed_all arguments FunctionDef name:cb arguments Assign Assign For Call Call Assign If Call Assign Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_values", + "source_code": "@property\ndef _values(self):\n ordered = list(zip(*sorted(self.items(), key=lambda it: it[0])))\n if ordered:\n return ordered[1]\n return []", + "docstring": "Collect values for TrackableDataStructure.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py", + "ast_data": "FunctionDef name:_values arg:self arguments arg Assign Call Call Call Call arguments arg If Return return:yes Return return:no" + }, + { + "library": "scipy", + "name": "rvs", + "source_code": "def rvs(self, loc=None, shape=1, df=1, size=1, random_state=None):\n dim, loc, shape, df = self._process_parameters(loc, shape, df)\n if random_state is not None:\n rng = check_random_state(random_state)\n else:\n rng = self._random_state\n if np.isinf(df):\n x = np.ones(size)\n else:\n x = rng.chisquare(df, size=size) / df\n z = rng.multivariate_normal(np.zeros(dim), shape, size=size)\n samples = loc + z / np.sqrt(x)[..., None]\n return _squeeze_output(samples)", + "docstring": "Draw random samples from a multivariate t-distribution. Parameters ---------- %(_mvt_doc_default_callparams)s size : integer, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (, ), where is the dimension of the random variable. Examples -------- >>> from scipy.stats import multivariate_t >>> x = [0.4, 5] >>> loc = [0, 1] >>> shape = [[1, 0.1], [0.1, 1]] >>> df = 7 >>> multivariate_t.rvs(loc, shape, df) array([[0.93477495, 3.00408716]])", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:rvs arg:self arg:loc arg:shape arg:df arg:size arg:random_state arguments arg arg arg arg arg arg Assign Call If Compare Assign Call Assign If Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "xdivy", + "source_code": "@tf_export('math.xdivy')\n@dispatch.register_binary_elementwise_api\n@dispatch.add_dispatch_support\ndef xdivy(x, y, name=None):\n with ops.name_scope(name, 'xdivy', [x]):\n return gen_math_ops.xdivy(x, y)", + "docstring": "Computes . Given and , computes . This function safely returns zero when , no matter what the value of is. Example: >>> tf.math.xdivy(1., 2.) >>> tf.math.xdivy(0., 1.) >>> tf.math.xdivy(0., 0.) >>> tf.math.xdivy(1., 0.) Args: x: A of type , , , , y: A of type , , , , name: A name for the operation (optional). Returns: .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:xdivy arg:x arg:y arg:name arguments arg arg arg With Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "read_table", + "source_code": "def read_table(self, table_name: str, index_col: str | list[str] | None=None, coerce_float: bool=True, parse_dates=None, columns=None, schema: str | None=None, chunksize: int | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame | Iterator[DataFrame]:\n if coerce_float is not True:\n raise NotImplementedError(\"'coerce_float' is not implemented for ADBC drivers\")\n if chunksize:\n raise NotImplementedError(\"'chunksize' is not implemented for ADBC drivers\")\n if columns:\n if index_col:\n index_select = maybe_make_list(index_col)\n else:\n index_select = []\n to_select = index_select + columns\n select_list = ', '.join((f'\"{x}\"' for x in to_select))\n else:\n select_list = '*'\n if schema:\n stmt = f'SELECT {select_list} FROM {schema}.{table_name}'\n else:\n stmt = f'SELECT {select_list} FROM {table_name}'\n with self.execute(stmt) as cur:\n pa_table = cur.fetch_arrow_table()\n df = arrow_table_to_pandas(pa_table, dtype_backend=dtype_backend)\n return _wrap_result_adbc(df, index_col=index_col, parse_dates=parse_dates)", + "docstring": "Read SQL database table into a DataFrame. Parameters ---------- table_name : str Name of SQL table in database. coerce_float : bool, default True Raises NotImplementedError parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of `pandas.to_datetimeDataFrameDataFrameArrowDtypeDataFrame` .. versionadded:: 2.0 Returns ------- DataFrame See Also -------- pandas.read_sql_table SQLDatabase.read_query", + "type": "method", + "file_path": "pandas\\pandas\\io\\sql.py", + "ast_data": "FunctionDef name:read_table arg:self arg:table_name arg:index_col arg:coerce_float arg:parse_dates arg:columns arg:schema arg:chunksize arg:dtype_backend arguments arg arg arg arg arg arg arg arg arg If Compare Raise Call If Raise Call If If Assign Call Assign Assign Assign Call Assign If Assign Assign With Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "create_script_class", + "source_code": "def create_script_class(obj):\n qualified_class_name = _jit_internal._qualified_name(type(obj))\n rcb = _jit_internal.createResolutionCallbackForClassMethods(type(obj))\n _compile_and_register_class(type(obj), rcb, qualified_class_name)\n class_ty = _python_cu.get_class(qualified_class_name)\n cpp_object = torch._C._create_object_with_type(class_ty)\n for name, value in obj.__dict__.items():\n cpp_object.setattr(name, value)\n return wrap_cpp_class(cpp_object)", + "docstring": "Create and return a RecursiveScriptClass instance from a Python object. Arguments: obj: A Python object.", + "type": "function", + "file_path": "pytorch\\torch\\jit\\_recursive.py", + "ast_data": "FunctionDef name:create_script_class arg:obj arguments arg Assign Call Call Assign Call Call Call Call Assign Call Assign Call For Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_maybe_add_module_deprecation_warning", + "source_code": "def _maybe_add_module_deprecation_warning(self, node, full_name, whole_name):\n warnings = self._api_change_spec.module_deprecations\n if full_name in warnings:\n level, message = warnings[full_name]\n message = message.replace('', whole_name)\n self.add_log(level, node.lineno, node.col_offset, 'Using member %s in deprecated module %s. %s' % (whole_name, full_name, message))\n return True\n else:\n return False", + "docstring": "Adds a warning if full_name is a deprecated module.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py", + "ast_data": "FunctionDef name:_maybe_add_module_deprecation_warning arg:self arg:node arg:full_name arg:whole_name arguments arg arg arg arg Assign If Compare Assign Assign Call Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_tmp_dir", + "source_code": "@classmethod\ndef _get_tmp_dir(cls: type[AOTAutogradCache]) -> str:\n return os.path.join(cache_dir(), 'aotautograd')", + "docstring": "Get the toplevel temporary directory for storing compiled graphs.", + "type": "method", + "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py", + "ast_data": "FunctionDef name:_get_tmp_dir arg:cls arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "CommBufferLayout", + "source_code": "class CommBufferLayout(FixedLayout):\n comm_buffer_type: CommBufferType\n group_name: str\n\n def __init__(self, layout: FlexibleLayout, comm_buffer_type: CommBufferType, group_name: str):\n if not isinstance(layout, FlexibleLayout):\n raise AssertionError(f'A `CommBufferLayout` can only be initialized with a `FlexibleLayout` (got {layout}).')\n fixed = layout.as_fixed()\n super().__init__(device=fixed.device, dtype=fixed.dtype, size=fixed.size, stride=fixed.stride, offset=fixed.offset)\n self.comm_buffer_type = comm_buffer_type\n self.group_name = group_name", + "docstring": "A layout that signifies the buffer is a comm buffer. In terms of striding, the layout is identical to . Buffers with this layout do not participate in in-place reuse - it can be neither the source nor the target for in-place reuse. For detailed motivation and usage of this layout, see NOTE [lowering-time collective optimization].", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "ClassDef name:CommBufferLayout FunctionDef name:__init__ arg:self arg:layout arg:comm_buffer_type arg:group_name arguments arg arg arg arg If Call Raise Call Assign Call Call Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "cast", + "source_code": "@doc_controls.do_not_doc_inheritable\ndef cast(self, value, cast_context):\n return super().cast(value, cast_context)", + "docstring": "See tf.types.experimental.TraceType base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py", + "ast_data": "FunctionDef name:cast arg:self arg:value arg:cast_context arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "render", + "source_code": "def render(self, context):\n with context.render_context.push_state(self):\n if context.template is None:\n with context.bind_template(self):\n context.template_name = self.name\n return self._render(context)\n else:\n return self._render(context)", + "docstring": "Display stage -- can be called many times", + "type": "method", + "file_path": "django\\django\\template\\base.py", + "ast_data": "FunctionDef name:render arg:self arg:context arguments arg arg With Call If Compare With Call Assign Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "weights", + "source_code": "@property\ndef weights(self):\n return self.variables", + "docstring": "List of weights/variables created by the Template.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py", + "ast_data": "FunctionDef name:weights arg:self arguments arg Return return:yes" + }, + { + "library": "scrapy", + "name": "send_data", + "source_code": "def send_data(self) -> None:\n if self.metadata['stream_closed_local']:\n raise StreamClosedError(self.stream_id)\n window_size = self._protocol.conn.local_flow_control_window(stream_id=self.stream_id)\n max_frame_size = self._protocol.conn.max_outbound_frame_size\n bytes_to_send_size = min(window_size, self.metadata['remaining_content_length'])\n while bytes_to_send_size > 0:\n chunk_size = min(bytes_to_send_size, max_frame_size)\n data_chunk_start_id = self.metadata['request_content_length'] - self.metadata['remaining_content_length']\n data_chunk = self._request.body[data_chunk_start_id:data_chunk_start_id + chunk_size]\n self._protocol.conn.send_data(self.stream_id, data_chunk, end_stream=False)\n bytes_to_send_size -= chunk_size\n self.metadata['remaining_content_length'] -= chunk_size\n self.metadata['remaining_content_length'] = max(0, self.metadata['remaining_content_length'])\n if self.metadata['remaining_content_length'] == 0:\n self._protocol.conn.end_stream(self.stream_id)", + "docstring": "Called immediately after the headers are sent. Here we send all the data as part of the request. If the content length is 0 initially then we end the stream immediately and wait for response data. Warning: Only call this method when stream not closed from client side and has initiated request already by sending HEADER frame. If not then stream will raise ProtocolError (raise by h2 state machine).", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\http2\\stream.py", + "ast_data": "FunctionDef name:send_data arg:self arguments arg If Raise Call Assign Call Assign Assign Call While Compare Assign Call Assign Assign Call Assign Call If Compare Call" + }, + { + "library": "pytorch", + "name": "replace_with_zeros", + "source_code": "def replace_with_zeros(num):\n if not isinstance(num, int):\n raise ValueError('Input must be an integer')\n digits_to_remove = len(str(abs(num))) - 4\n if digits_to_remove > 0:\n modified_num = num // 10 ** digits_to_remove * 10 ** digits_to_remove\n else:\n modified_num = num\n return modified_num", + "docstring": "Keeps the first three digits of an integer and replaces the rest with zeros. Args: num (int): The number to modify. Returns: int: The modified number. Raises: ValueError: If the input is not an integer.", + "type": "function", + "file_path": "pytorch\\benchmarks\\dynamo\\pr_time_benchmarks\\check_results.py", + "ast_data": "FunctionDef name:replace_with_zeros arg:num arguments arg If Call Raise Call Assign Call Call Call If Compare Assign Assign Return return:yes" + }, + { + "library": "kornia", + "name": "visualize", + "source_code": "def visualize(self, images: Union[Tensor, list[Tensor]], semantic_masks: Optional[Union[Tensor, list[Tensor]]]=None, output_type: str='torch', colormap: str='random', manual_seed: int=2147) -> Union[Tensor, list[Tensor], list[Image.Image]]:\n if semantic_masks is None:\n semantic_masks = self.forward(images)\n outputs: Union[Tensor, list[Tensor]]\n if isinstance(semantic_masks, (list, tuple)):\n outputs = []\n for semantic_mask in semantic_masks:\n if semantic_mask.ndim != 3:\n raise ValueError(f'Semantic mask must be of shape (C, H, W), got {semantic_mask.shape}.')\n colors = self.get_colormap(semantic_mask.size(0), colormap, manual_seed=manual_seed)\n outputs.append(self.visualize_output(semantic_mask, colors))\n else:\n colors = self.get_colormap(semantic_masks.size(1), colormap, manual_seed=manual_seed)\n outputs = self.visualize_output(semantic_masks, colors)\n return self._tensor_to_type(outputs, output_type, is_batch=True if isinstance(outputs, Tensor) else False)", + "docstring": "Visualize the segmentation masks. Args: images: If list of RGB images. Each image is a Tensor with shape :math:. If Tensor, a Tensor with shape :math:. semantic_masks: If list of segmentation masks. Each mask is a Tensor with shape :math:. If Tensor, a Tensor with shape :math:. output_type: The type of output, can be \"torch\" or \"PIL\". colormap: The colormap to use, can be \"random\" or a custom color map. manual_seed: The manual seed to use for the colormap.", + "type": "method", + "file_path": "kornia\\kornia\\models\\segmentation\\base.py", + "ast_data": "FunctionDef name:visualize arg:self arg:images arg:semantic_masks arg:output_type arg:colormap arg:manual_seed arguments arg arg arg arg arg arg If Compare Assign Call If Call Assign For If Compare Raise Call Assign Call Call Call Call Assign Call Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "selu", + "source_code": "@dispatch.add_dispatch_support\ndef selu(x):\n return nn.selu(x)", + "docstring": "Scaled Exponential Linear Unit (SELU). The Scaled Exponential Linear Unit (SELU) activation function is defined as: - - tf.keras.activations.elualphascaletf.keras.initializers.LecunNormalscale * elu(x, alpha)tf.keras.initializers.LecunNormaltf.keras.layers.AlphaDropout` (not regular dropout). References: - [Klambauer et al., 2017](", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py", + "ast_data": "FunctionDef name:selu arg:x arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_pairwise_callable", + "source_code": "def _pairwise_callable(X, Y, metric, ensure_all_finite=True, **kwds):\n X, Y = check_pairwise_arrays(X, Y, dtype=None, ensure_all_finite=ensure_all_finite, ensure_2d=False)\n if X is Y:\n out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')\n iterator = itertools.combinations(range(X.shape[0]), 2)\n for i, j in iterator:\n x = X[[i], :] if issparse(X) else X[i]\n y = Y[[j], :] if issparse(Y) else Y[j]\n out[i, j] = metric(x, y, **kwds)\n out = out + out.T\n for i in range(X.shape[0]):\n x = X[[i], :] if issparse(X) else X[i]\n out[i, i] = metric(x, x, **kwds)\n else:\n out = np.empty((X.shape[0], Y.shape[0]), dtype='float')\n iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))\n for i, j in iterator:\n x = X[[i], :] if issparse(X) else X[i]\n y = Y[[j], :] if issparse(Y) else Y[j]\n out[i, j] = metric(x, y, **kwds)\n return out", + "docstring": "Handle the callable case for pairwise_{distances,kernels}.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py", + "ast_data": "FunctionDef name:_pairwise_callable arg:X arg:Y arg:metric arg:ensure_all_finite arguments arg arg arg arg arg Assign Call If Compare Assign Call Assign Call Call For Assign Call Assign Call Assign Call Assign For Call Assign Call Assign Call Assign Call Assign Call Call Call For Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "ModelVersionSelector", + "source_code": "class ModelVersionSelector(object):\n\n def __new__(cls, *args, **kwargs):\n use_v2 = should_use_v2()\n cls = swap_class(cls, training.Model, training_v1.Model, use_v2)\n return super(ModelVersionSelector, cls).__new__(cls)", + "docstring": "Chooses between Keras v1 and v2 Model class.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\version_utils.py", + "ast_data": "ClassDef name:ModelVersionSelector FunctionDef name:__new__ arg:cls arguments arg arg arg Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "get_flags_f90", + "source_code": "def get_flags_f90(self):\n return self._get_command_flags('compiler_f90')", + "docstring": "List of Fortran 90 specific flags.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py", + "ast_data": "FunctionDef name:get_flags_f90 arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "make_contiguous_strides_for", + "source_code": "def make_contiguous_strides_for(shape: ShapeType, row_major: bool=True) -> tuple[Union[_IntLikeT, int], ...]:\n validate_shape(shape)\n if not shape:\n return ()\n from torch.fx.experimental.symbolic_shapes import is_nested_int\n multiplier: Union[_IntLikeT, int] = 1\n strides = []\n for l in reversed(shape):\n strides.append(multiplier)\n multiplier *= l if is_nested_int(l) else sym_max(l, 1)\n result = tuple(reversed(strides))\n if row_major:\n return result\n else:\n if len(shape) < 2:\n return result\n return result[:-2] + (1, max(shape[-2], 1))", + "docstring": "Returns the strides of a contiguous tensor if row_major If row_major=True, it returns the strides of a contiguous batch of Fortran-contiguous matrices This is often used when calling external libraries like BLAS/LAPACK/cuSolver...", + "type": "function", + "file_path": "pytorch\\torch\\_prims_common\\__init__.py", + "ast_data": "FunctionDef name:make_contiguous_strides_for arg:shape arg:row_major arguments arg arg Call If Return return:no Assign For Call Call Call Call Assign Call Call If Return return:yes If Compare Call Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, name):\n self._name = name\n self._registry = {}", + "docstring": "Creates a new registry.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\registry.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:name arguments arg arg Assign Assign" + }, + { + "library": "tensorflow", + "name": "peek_traceable_objs", + "source_code": "def peek_traceable_objs(self) -> Iterator[TraceableObject[T]]:\n return reversed(self._stack)", + "docstring": "Return iterator over stored TraceableObjects ordered newest to oldest.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\traceable_stack.py", + "ast_data": "FunctionDef name:peek_traceable_objs arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "UnaryDataset", + "source_code": "class UnaryDataset(DatasetV2):\n\n def __init__(self, input_dataset: DatasetV2, variant_tensor):\n self._input_dataset = input_dataset\n super(UnaryDataset, self).__init__(variant_tensor)\n\n def _inputs(self):\n return [self._input_dataset]", + "docstring": "Abstract class representing a dataset with one input.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "ClassDef name:UnaryDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:variant_tensor arguments arg arg arg Assign Call Call FunctionDef name:_inputs arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "reset_min_max_vals", + "source_code": "@torch.jit.export\ndef reset_min_max_vals(self):\n self.min_val = torch.rand(0)\n self.max_val = torch.rand(0)", + "docstring": "Resets the min/max values.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\observer.py", + "ast_data": "FunctionDef name:reset_min_max_vals arg:self arguments arg Assign Call Assign Call" + }, + { + "library": "django", + "name": "add_error", + "source_code": "def add_error(self, field, error):\n if not isinstance(error, ValidationError):\n error = ValidationError(error)\n if hasattr(error, 'error_dict'):\n if field is not None:\n raise TypeError('The argument `field` must be `None` when the `error` argument contains errors for multiple fields.')\n else:\n error = error.error_dict\n else:\n error = {field or NON_FIELD_ERRORS: error.error_list}\n for field, error_list in error.items():\n if field not in self.errors:\n if field != NON_FIELD_ERRORS and field not in self.fields:\n raise ValueError(\"'%s' has no field named '%s'.\" % (self.__class__.__name__, field))\n if field == NON_FIELD_ERRORS:\n self._errors[field] = self.error_class(error_class='nonfield', renderer=self.renderer)\n else:\n self._errors[field] = self.error_class(renderer=self.renderer, field_id=self[field].auto_id)\n self._errors[field].extend(error_list)\n if field in self.cleaned_data:\n del self.cleaned_data[field]", + "docstring": "Update the content of . The argument is the name of the field to which the errors should be added. If it's None, treat the errors as NON_FIELD_ERRORS. The argument can be a single error, a list of errors, or a dictionary that maps field names to lists of errors. An \"error\" can be either a simple string or an instance of ValidationError with its message attribute set and a \"list or dictionary\" can be an actual or or an instance of ValidationError with its or attribute set. If is a dictionary, the argument *must* be None and errors will be added to the fields that correspond to the keys of the dictionary.", + "type": "method", + "file_path": "django\\django\\forms\\forms.py", + "ast_data": "FunctionDef name:add_error arg:self arg:field arg:error arguments arg arg arg If Call Assign Call If Call If Compare Raise Call Assign Assign BoolOp For Call If Compare If BoolOp Compare Compare Raise Call If Compare Assign Call Assign Call Call If Compare" + }, + { + "library": "matplotlib", + "name": "_check_unsampled_image", + "source_code": "def _check_unsampled_image(self):\n return False", + "docstring": "Return False. Do not use unsampled image.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\image.py", + "ast_data": "FunctionDef name:_check_unsampled_image arg:self arguments arg Return return:yes" + }, + { + "library": "sphinx", + "name": "isroutine", + "source_code": "def isroutine(obj: Any) -> TypeIs[_RoutineType]:\n return inspect.isroutine(unpartial(obj))", + "docstring": "Check if the object is a kind of function or method. Partial objects are unwrapped before checking them. .. seealso:: :external+python:func:", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\inspect.py", + "ast_data": "FunctionDef name:isroutine arg:obj arguments arg Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "_get_machar", + "source_code": "def _get_machar(ftype):\n params = _MACHAR_PARAMS.get(ftype)\n if params is None:\n raise ValueError(repr(ftype))\n key = ftype(-1.0) / ftype(10.0)\n key = key.view(key.dtype.newbyteorder('<')).tobytes()\n ma_like = None\n if ftype == ntypes.longdouble:\n ma_like = _KNOWN_TYPES.get(key[:10])\n if ma_like is None:\n ma_like = _KNOWN_TYPES.get(key)\n if ma_like is None and len(key) == 16:\n _kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16}\n ma_like = _kt.get(key[:10])\n if ma_like is not None:\n return ma_like\n warnings.warn(f'Signature {key} for {ftype} does not match any known type: falling back to type probe function.\\nThis warnings indicates broken support for the dtype!', UserWarning, stacklevel=2)\n return _discovered_machar(ftype)", + "docstring": "Get MachAr instance or MachAr-like instance Get parameters for floating point type, by first trying signatures of various known floating point types, then, if none match, attempting to identify parameters by analysis. Parameters ---------- ftype : class Numpy floating point type class (e.g. `MachArMachArLikeftype`. Warns ----- UserWarning If the binary signature of the float type is not in the dictionary of known float types.", + "type": "function", + "file_path": "numpy\\numpy\\_core\\getlimits.py", + "ast_data": "FunctionDef name:_get_machar arg:ftype arguments arg Assign Call If Compare Raise Call Call Assign Call Call Assign Call Call Call Assign If Compare Assign Call If Compare Assign Call If BoolOp Compare Compare Call Assign Call Compare Call Assign Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "sdpa_kernel", + "source_code": "@contextlib.contextmanager\ndef sdpa_kernel(backends: Union[list[SDPBackend], SDPBackend], set_priority: bool=False):\n assert isinstance(backends, (list, SDPBackend)), 'Backend must be an instance of SDPBackend or a list of SDPBackend instances'\n if isinstance(backends, SDPBackend):\n backends = [backends]\n backends = list(dict.fromkeys(backends))\n previous_backends = _cur_sdpa_kernel_backends(with_priority=set_priority)\n try:\n _sdpa_kernel(backends, set_priority)\n yield {}\n finally:\n _sdpa_kernel(previous_backends, set_priority)", + "docstring": "Context manager to select which backend to use for scaled dot product attention. .. warning:: This function is beta and subject to change. Args: backends (Union[List[SDPBackend], SDPBackend]): A backend or list of backends for scaled dot product attention. set_priority_order (bool=False): Whether the ordering of the backends is interpreted as their priority order. Example: .. code-block:: python from torch.nn.functional import scaled_dot_product_attention from torch.nn.attention import SDPBackend, sdpa_kernel # Only enable flash attention backend with sdpa_kernel(SDPBackend.FLASH_ATTENTION): scaled_dot_product_attention(...) # Enable the Math or Efficient attention backends with sdpa_kernel([SDPBackend.MATH, SDPBackend.EFFICIENT_ATTENTION]): scaled_dot_product_attention(...) This context manager can be used to select which backend to use for scaled dot product attention. Upon exiting the context manager, the previous state of the flags will be restored, enabling all backends.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\attention\\__init__.py", + "ast_data": "FunctionDef name:sdpa_kernel arg:backends arg:set_priority arguments arg arg Call If Call Assign Assign Call Call Assign Call Try Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, sess_creator):\n self._sess_creator = sess_creator\n _WrappedSession.__init__(self, self._create_session())", + "docstring": "Create a new . The value returned by calling will be the session wrapped by this recoverable session. Args: sess_creator: A 'SessionCreator' to be wrapped by recoverable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:sess_creator arguments arg arg Assign Call Call" + }, + { + "library": "pytorch", + "name": "get_unique_attr_name_in_module", + "source_code": "def get_unique_attr_name_in_module(mod_traced: torch.fx.GraphModule, name: str) -> str:\n name = re.sub('[^0-9a-zA-Z_]+', '_', name)\n if name[0].isdigit():\n name = f'_{name}'\n while hasattr(mod_traced, name):\n match = re.match('(.*)_(\\\\d+)$', name)\n if match is None:\n name = name + '_1'\n else:\n base, num = match.group(1, 2)\n name = f'{base}_{int(num) + 1}'\n return name", + "docstring": "Make sure the name is unique (in a module) and can represents an attr.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\const_fold.py", + "ast_data": "FunctionDef name:get_unique_attr_name_in_module arg:mod_traced arg:name arguments arg arg Assign Call If Call Assign While Call Assign Call If Compare Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "num_fields", + "source_code": "@property\ndef num_fields(self):\n return capi.get_field_count(self._ldefn)", + "docstring": "Return the number of fields in the Layer.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py", + "ast_data": "FunctionDef name:num_fields arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_default_compare_output_module_list", + "source_code": "def get_default_compare_output_module_list() -> set[Callable]:\n NUMERIC_SUITE_COMPARE_MODEL_OUTPUT_MODULE_LIST = set(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS.values()) | set(DEFAULT_QAT_MODULE_MAPPINGS.values()) | set(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS.values()) | set(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS.keys()) | set(DEFAULT_QAT_MODULE_MAPPINGS.keys()) | set(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS.keys()) | _INCLUDE_QCONFIG_PROPAGATE_LIST\n return copy.deepcopy(NUMERIC_SUITE_COMPARE_MODEL_OUTPUT_MODULE_LIST)", + "docstring": "Get list of module class types that we will record output in numeric suite", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py", + "ast_data": "FunctionDef name:get_default_compare_output_module_list arguments Assign Call Call Call Call Call Call Call Call Call Call Call Call Return return:yes Call" + }, + { + "library": "pygame", + "name": "get_top_sprite", + "source_code": "def get_top_sprite(self):\n return self._spritelist[-1]", + "docstring": "return the topmost sprite LayeredUpdates.get_top_sprite(): return Sprite", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:get_top_sprite arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_py_list_append", + "source_code": "def _py_list_append(list_, x):\n list_.append(x)\n return list_", + "docstring": "Overload of list_append that executes a Python list append.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\data_structures.py", + "ast_data": "FunctionDef name:_py_list_append arg:list_ arg:x arguments arg arg Call Return return:yes" + }, + { + "library": "seaborn", + "name": "_get_axes", + "source_code": "def _get_axes(self, sub_vars):\n row = sub_vars.get('row', None)\n col = sub_vars.get('col', None)\n if row is not None and col is not None:\n return self.facets.axes_dict[row, col]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax", + "docstring": "Return an Axes object based on existence of row/col variables.", + "type": "method", + "file_path": "seaborn\\seaborn\\_base.py", + "ast_data": "FunctionDef name:_get_axes arg:self arg:sub_vars arguments arg arg Assign Call Assign Call If BoolOp Compare Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Return return:yes" + }, + { + "library": "numpy", + "name": "errmess", + "source_code": "def errmess(s: str) -> None:\n if sys.stderr is not None:\n sys.stderr.write(s)", + "docstring": "Write an error message to stderr. This indirection is needed because sys.stderr might not always be available (see #26862).", + "type": "function", + "file_path": "numpy\\numpy\\f2py\\cfuncs.py", + "ast_data": "FunctionDef name:errmess arg:s arguments arg If Compare Call" + }, + { + "library": "pytorch", + "name": "clear", + "source_code": "def clear(self) -> None:\n self.start_callbacks.clear()\n self.end_callbacks.clear()", + "docstring": "Clear all registered callbacks.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\callback.py", + "ast_data": "FunctionDef name:clear arg:self arguments arg Call Call" + }, + { + "library": "pygame", + "name": "get_init", + "source_code": "def get_init():\n return _ft_init", + "docstring": "get_init() -> bool returns True if the fastevent module is currently initialized", + "type": "function", + "file_path": "pygame\\src_py\\fastevent.py", + "ast_data": "FunctionDef name:get_init arguments Return return:yes" + }, + { + "library": "tensorflow", + "name": "from_proto", + "source_code": "def from_proto(self, proto):\n return self._object_factory(proto)", + "docstring": "Recreate a trackable object from a SavedUserObject proto.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\revived_types.py", + "ast_data": "FunctionDef name:from_proto arg:self arg:proto arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "name", + "source_code": "@property\ndef name(self):\n return self._name", + "docstring": "The name of the staging area.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "__repr__", + "source_code": "def __repr__(self) -> str:\n repr = f'amount={self.amount}, salt_and_pepper={self.salt_and_pepper}'\n return repr", + "docstring": "Return a string representation of the object.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\salt_pepper_noise.py", + "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes" + }, + { + "library": "scipy", + "name": "checkbreak_con", + "source_code": "def checkbreak_con(maxfun, nf, cstrv, ctol, f, ftarget, x):\n info = INFO_DEFAULT\n srname = 'CHECKbreak_CON'\n assert INFO_DEFAULT not in [NAN_INF_X, NAN_INF_F, FTARGET_ACHIEVED, MAXFUN_REACHED], f'NAN_INF_X, NAN_INF_F, FTARGET_ACHIEVED, and MAXFUN_REACHED differ from INFO_DFT {srname}'\n assert not any(np.isnan(x)), f'X does not contain NaN {srname}'\n assert not (np.isnan(f) or np.isposinf(f) or np.isnan(cstrv) or np.isposinf(cstrv)), f'F or CSTRV is not NaN/+Inf {srname}'\n if any(np.isnan(x)) or any(np.isinf(x)):\n info = NAN_INF_X\n if np.isnan(f) or np.isposinf(f) or np.isnan(cstrv) or np.isposinf(cstrv):\n info = NAN_INF_F\n if cstrv <= ctol and f <= ftarget:\n info = FTARGET_ACHIEVED\n if nf >= maxfun:\n info = MAXFUN_REACHED\n return info", + "docstring": "This module checks whether to break out of the solver loop in the constrained case.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\checkbreak.py", + "ast_data": "FunctionDef name:checkbreak_con arg:maxfun arg:nf arg:cstrv arg:ctol arg:f arg:ftarget arg:x arguments arg arg arg arg arg arg arg Assign Assign Compare Call Call BoolOp Call Call Call Call If BoolOp Call Call Call Call Assign If BoolOp Call Call Call Call Assign If BoolOp Compare Compare Assign If Compare Assign Return return:yes" + }, + { + "library": "scrapy", + "name": "ScrapyDeprecationWarning", + "source_code": "class ScrapyDeprecationWarning(Warning):\n pass", + "docstring": "Warning category for deprecated features, since the default DeprecationWarning is silenced on Python 2.7+", + "type": "class", + "file_path": "scrapy\\scrapy\\exceptions.py", + "ast_data": "ClassDef name:ScrapyDeprecationWarning" + }, + { + "library": "pytorch", + "name": "get_input_node_symbols", + "source_code": "def get_input_node_symbols(node: Union[ir.IRNode, sympy.Expr, ir.TorchBindObject]) -> OrderedSet[sympy.Symbol]:\n if isinstance(node, ir.TorchBindObject):\n return OrderedSet()\n elif isinstance(node, ir.IRNode):\n return get_layout_symints(node)\n else:\n raise NotImplementedError(f'Unsupported input node type: {type(node)}')", + "docstring": "Gets symbols used in input node shapes, strides, and offsets.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:get_input_node_symbols arg:node arguments arg If Call Return return:yes Call If Call Return return:yes Call Raise Call Call" + }, + { + "library": "scipy", + "name": "cpu_count", + "source_code": "def cpu_count(only_physical_cores=False):\n os_cpu_count = os.cpu_count() or 1\n if sys.platform == 'win32':\n os_cpu_count = min(os_cpu_count, _MAX_WINDOWS_WORKERS)\n cpu_count_user = _cpu_count_user(os_cpu_count)\n aggregate_cpu_count = max(min(os_cpu_count, cpu_count_user), 1)\n if not only_physical_cores:\n return aggregate_cpu_count\n if cpu_count_user < os_cpu_count:\n return max(cpu_count_user, 1)\n cpu_count_physical, exception = _count_physical_cores()\n if cpu_count_physical != 'not found':\n return cpu_count_physical\n if exception is not None:\n warnings.warn(f'Could not find the number of physical cores for the following reason:\\n{exception}\\nReturning the number of logical cores instead. You can silence this warning by setting LOKY_MAX_CPU_COUNT to the number of cores you want to use.', stacklevel=2)\n traceback.print_tb(exception.__traceback__)\n return aggregate_cpu_count", + "docstring": "Return the number of CPUs the current process can use. The returned number of CPUs accounts for: * the number of CPUs in the system, as given by `` is True, return the number of physical cores instead of the number of logical cores (hyperthreading / SMT). Note that this option is not enforced if the number of usable cores is controlled in any other way such as: process affinity, Cgroup restricted CPU bandwidth or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical cores is not found, return the number of logical cores. Note that on Windows, the returned number of CPUs cannot exceed 61, see: It is also always larger or equal to 1.", + "type": "function", + "file_path": "scipy\\dev.py", + "ast_data": "FunctionDef name:cpu_count arg:only_physical_cores arguments arg Assign BoolOp Call If Compare Assign Call Assign Call Assign Call Call If Return return:yes If Compare Return return:yes Call Assign Call If Compare Return return:yes If Compare Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "sum_duplicates", + "source_code": "def sum_duplicates(self):\n if self.has_canonical_format:\n return\n self.sort_indices()\n R, C = self.blocksize\n M, N = self.shape\n n_row = M // R\n nnz = 0\n row_end = 0\n for i in range(n_row):\n jj = row_end\n row_end = self.indptr[i + 1]\n while jj < row_end:\n j = self.indices[jj]\n x = self.data[jj]\n jj += 1\n while jj < row_end and self.indices[jj] == j:\n x += self.data[jj]\n jj += 1\n self.indices[nnz] = j\n self.data[nnz] = x\n nnz += 1\n self.indptr[i + 1] = nnz\n self.prune()\n self.has_canonical_format = True", + "docstring": "Eliminate duplicate array/matrix entries by adding them together The is an *in place* operation", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_bsr.py", + "ast_data": "FunctionDef name:sum_duplicates arg:self arguments arg If Return return:no Call Assign Assign Assign Assign Assign For Call Assign Assign While Compare Assign Assign While BoolOp Compare Compare Assign Assign Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "apply_to_single_assignments", + "source_code": "def apply_to_single_assignments(self, targets, values, apply_fn):\n if not isinstance(targets, (list, tuple)):\n targets = (targets,)\n for target in targets:\n if isinstance(target, (gast.Tuple, gast.List)):\n for i in range(len(target.elts)):\n target_el = target.elts[i]\n if isinstance(values, (gast.Tuple, gast.List)):\n value_el = values.elts[i]\n else:\n value_el = gast.Subscript(values, i, ctx=gast.Store())\n self.apply_to_single_assignments(target_el, value_el, apply_fn)\n else:\n apply_fn(target, values)", + "docstring": "Applies a function to each individual assignment. This function can process a possibly-unpacked (e.g. a, b = c, d) assignment. It tries to break down the unpacking if possible. In effect, it has the same effect as passing the assigned values in SSA form to apply_fn. Examples: The following will result in apply_fn(a, c), apply_fn(b, d): a, b = c, d The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]): a, b = c The following will result in apply_fn(a, (b, c)): a = b, c It uses the visitor pattern to allow subclasses to process single assignments individually. Args: targets: list, tuple of or individual AST node. Should be used with the targets field of an ast.Assign node. values: an AST node. apply_fn: a function of a single argument, which will be called with the respective nodes of each single assignment. The signature is apply_fn(target, value), no return value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transformer.py", + "ast_data": "FunctionDef name:apply_to_single_assignments arg:self arg:targets arg:values arg:apply_fn arguments arg arg arg arg If Call Assign For If Call For Call Call Assign If Call Assign Assign Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "set_color", + "source_code": "def set_color(self, c):\n self.set_edgecolor(c)\n self.set_hatchcolor(c)\n self.set_facecolor(c)", + "docstring": "Set both the edgecolor and the facecolor. Parameters ---------- c : :mpltype: See Also -------- Patch.set_facecolor, Patch.set_edgecolor For setting the edge or face color individually.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:set_color arg:self arg:c arguments arg arg Call Call Call" + }, + { + "library": "pytorch", + "name": "Conv1d", + "source_code": "class Conv1d(_ConvNd, nn.Conv1d):\n _FLOAT_MODULE: ClassVar[type[nn.Conv1d]] = nn.Conv1d\n _FLOAT_CONV_MODULE: ClassVar[type[nn.Conv1d]] = nn.Conv1d\n\n def __init__(self, in_channels: int, out_channels: int, kernel_size: _size_1_t, stride: _size_1_t=1, padding: Union[str, _size_1_t]=0, dilation: _size_1_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', qconfig=None, device=None, dtype=None) -> None:\n kernel_size_ = _single(kernel_size)\n stride_ = _single(stride)\n padding_ = padding if isinstance(padding, str) else _single(padding)\n dilation_ = _single(dilation)\n super().__init__(in_channels, out_channels, kernel_size_, stride=stride_, padding=padding_, dilation=dilation_, transposed=False, output_padding=_single(0), groups=groups, bias=bias, padding_mode=padding_mode, qconfig=qconfig, device=device, dtype=dtype)\n\n @classmethod\n def from_float(cls, mod, use_precomputed_fake_quant=False):\n return super().from_float(cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant)", + "docstring": "A Conv1d module attached with FakeQuantize modules for weight, used for quantization aware training. We adopt the same interface as :class: Similar to :class:, with FakeQuantize modules initialized to default. Attributes: weight_fake_quant: fake quant module for weight", + "type": "class", + "file_path": "pytorch\\torch\\ao\\nn\\qat\\modules\\conv.py", + "ast_data": "ClassDef name:Conv1d FunctionDef name:__init__ arg:self arg:in_channels arg:out_channels arg:kernel_size arg:stride arg:padding arg:dilation arg:groups arg:bias arg:padding_mode arg:qconfig arg:device arg:dtype arguments arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Call Call Call FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "__next__", + "source_code": "def __next__(self):\n next(self._it)\n return self._it.multi_index", + "docstring": "Standard iterator method, updates the index and returns the index tuple. Returns ------- val : tuple of ints Returns a tuple containing the indices of the current iteration.", + "type": "method", + "file_path": "numpy\\numpy\\lib\\_index_tricks_impl.py", + "ast_data": "FunctionDef name:__next__ arg:self arguments arg Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "add_cell", + "source_code": "def add_cell(self, row, col, *args, **kwargs):\n xy = (0, 0)\n cell = Cell(xy, *args, visible_edges=self.edges, **kwargs)\n self[row, col] = cell\n return cell", + "docstring": "Create a cell and add it to the table. Parameters ---------- row : int Row index. col : int Column index. *args, **kwargs All other parameters are passed on to . Returns ------- The created cell.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\table.py", + "ast_data": "FunctionDef name:add_cell arg:self arg:row arg:col arguments arg arg arg arg arg Assign Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "FakeOptimizerForRestoration", + "source_code": "class FakeOptimizerForRestoration(trackable.Trackable):\n\n def __init__(self, optimizer):\n self._optimizer = optimizer\n\n def get_slot_names(self):\n return self._optimizer.get_slot_names()\n\n def _create_or_restore_slot_variable(self, slot_variable_position, slot_name, variable):\n return self._optimizer._create_or_restore_slot_variable(slot_variable_position, slot_name, variable)", + "docstring": "A fake optimizer used to support restoring TensorFlow 2.2 checkpoints. The checkpoint format for LossScaleOptimizers changed after TF 2.2. This class exists to support restoring TF 2.2 checkpoints in newer version of TensorFlow. In TF 2.2, LossScaleOptimizer would track the wrapped optimizer by calling the following in LossScaleOptimizer.__init__ This means a dependency from the LossScaleOptimizer to the wrapped optimizer would be stored in the checkpoint. However now, the checkpoint format with a LossScaleOptimizer is the same as the format without a LossScaleOptimizer, except the loss scale is also stored. This means there is no dependency from the LossScaleOptimizer to the wrapped optimizer. Instead, the LossScaleOptimizer acts as if it is the wrapped optimizer, from a checkpoint's perspective, by overriding all Trackable methods and delegating them to the wrapped optimizer. To allow restoring TF 2.2. checkpoints, LossScaleOptimizer adds a dependency on this class instead of the inner optimizer. When restored, this class will instead restore the slot variables of the inner optimizer. Since this class has no variables, it does not affect the checkpoint when saved.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py", + "ast_data": "ClassDef name:FakeOptimizerForRestoration FunctionDef name:__init__ arg:self arg:optimizer arguments arg arg Assign FunctionDef name:get_slot_names arg:self arguments arg Return return:yes Call FunctionDef name:_create_or_restore_slot_variable arg:self arg:slot_variable_position arg:slot_name arg:variable arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "benchmark_fused_nodes", + "source_code": "def benchmark_fused_nodes(self, nodes, n_spills_threshold=8) -> tuple[float, str]:\n src_code = self.generate_kernel_code_from_nodes(nodes, benchmark_kernel=True)\n mod = PyCodeCache.load(src_code)\n return self.benchmark_codegened_module(mod, n_spills_threshold, node_names=OrderedSet((n.get_name() for n in nodes)))", + "docstring": "Benchmark fused list of nodes and return the execution time in milliseconds on randomly generated inputs.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py", + "ast_data": "FunctionDef name:benchmark_fused_nodes arg:self arg:nodes arg:n_spills_threshold arguments arg arg arg Assign Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "extract_variable_info", + "source_code": "def extract_variable_info(kwargs: Any) -> Tuple[str, Tuple[int, ...], dtypes.DType, Callable[[], Any], Optional[int]]:\n\n def get_restore_uid(initial_value: Callable[..., Any]) -> int | None:\n return getattr(initial_value, 'restore_uid', None)\n if isinstance(kwargs['initial_value'], functools.partial) and ('shape' in kwargs['initial_value'].keywords or kwargs['initial_value'].args):\n if 'shape' in kwargs['initial_value'].keywords:\n shape = kwargs['initial_value'].keywords['shape']\n else:\n shape = kwargs['initial_value'].args[0]\n return (kwargs['name'], shape, kwargs['initial_value'].keywords.get('dtype', kwargs['dtype']), kwargs['initial_value'].func, get_restore_uid(kwargs['initial_value'].func))\n elif 'shape' not in kwargs or kwargs['shape'] is None or (not callable(kwargs['initial_value'])):\n raise ValueError('Unable to extract initializer function and shape from {}. Please either pass a function that expects a shape and dtype as the initial value for your variable or functools.partial object with the shape and dtype kwargs set. This is needed so that we can initialize the shards of the ShardedVariable locally.'.format(kwargs['initial_value']))\n else:\n return (kwargs['name'], kwargs['shape'], kwargs['dtype'], kwargs['initial_value'], get_restore_uid(kwargs['initial_value']))", + "docstring": "Extracts the variable creation attributes from the kwargs. Args: kwargs: a dict of keyword arguments that were passed to a variable creator scope. Returns: A tuple of variable name, shape, dtype, initialization function, restore_uid.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py", + "ast_data": "FunctionDef name:extract_variable_info arg:kwargs arguments arg FunctionDef name:get_restore_uid arg:initial_value arguments arg Return return:yes Call If BoolOp Call BoolOp Compare If Compare Assign Assign Return return:yes Call Call If BoolOp Compare Compare Call Raise Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "get_form_kwargs", + "source_code": "def get_form_kwargs(self):\n kwargs = {'initial': self.get_initial(), 'prefix': self.get_prefix()}\n if self.request.method in ('POST', 'PUT'):\n kwargs.update({'data': self.request.POST, 'files': self.request.FILES})\n return kwargs", + "docstring": "Return the keyword arguments for instantiating the form.", + "type": "method", + "file_path": "django\\django\\views\\generic\\edit.py", + "ast_data": "FunctionDef name:get_form_kwargs arg:self arguments arg Assign Call Call If Compare Call Return return:yes" + }, + { + "library": "django", + "name": "get_host", + "source_code": "def get_host(self):\n host = self._get_raw_host()\n allowed_hosts = settings.ALLOWED_HOSTS\n if settings.DEBUG and (not allowed_hosts):\n allowed_hosts = ['.localhost', '127.0.0.1', '[::1]']\n domain, port = split_domain_port(host)\n if domain and validate_host(domain, allowed_hosts):\n return host\n else:\n msg = 'Invalid HTTP_HOST header: %r.' % host\n if domain:\n msg += ' You may need to add %r to ALLOWED_HOSTS.' % domain\n else:\n msg += ' The domain name provided is not valid according to RFC 1034/1035.'\n raise DisallowedHost(msg)", + "docstring": "Return the HTTP host using the environment or request headers.", + "type": "method", + "file_path": "django\\django\\http\\request.py", + "ast_data": "FunctionDef name:get_host arg:self arguments arg Assign Call Assign If BoolOp Assign Assign Call If BoolOp Call Return return:yes Assign If Raise Call" + }, + { + "library": "pandas", + "name": "is_label_like", + "source_code": "def is_label_like(key) -> bool:\n return not isinstance(key, slice) and (not is_list_like_indexer(key)) and (key is not Ellipsis)", + "docstring": "Returns ------- bool", + "type": "function", + "file_path": "pandas\\pandas\\core\\indexing.py", + "ast_data": "FunctionDef name:is_label_like arg:key arguments arg Return return:yes BoolOp Call Call Compare" + }, + { + "library": "pytorch", + "name": "_initiate_registry_from_torchlib", + "source_code": "def _initiate_registry_from_torchlib(self) -> None:\n for meta in onnxscript_apis.get_torchlib_ops():\n internal_name_instance = registration.OpName.from_qualified_name(meta.qualified_name)\n symbolic_function = registration.ONNXFunction(onnx_function=meta.function, op_full_name=internal_name_instance.qualified_name(), is_custom=False, is_complex=meta.is_complex)\n self._register(internal_name_instance, symbolic_function)", + "docstring": "Populates the registry with ATen functions from torchlib. Args: torchlib_registry: The torchlib registry to use for populating the registry.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py", + "ast_data": "FunctionDef name:_initiate_registry_from_torchlib arg:self arguments arg For Call Assign Call Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "write_results_to_csv", + "source_code": "def write_results_to_csv(experiments: list[Experiment], output_dir: str='benchmark_results'):\n import csv\n import os\n from datetime import datetime\n os.makedirs(output_dir, exist_ok=True)\n timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')\n filename = os.path.join(output_dir, f'benchmark_results_{timestamp}.csv')\n if not experiments:\n return\n fieldnames = list(experiments[0].asdict().keys())\n if 'device' in fieldnames:\n fieldnames.remove('device')\n with open(filename, 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for experiment in experiments:\n row = experiment.asdict()\n if 'device' in row:\n del row['device']\n writer.writerow(row)\n print(f'Results written to: {filename}')", + "docstring": "Write experiment results to a CSV file in the specified directory. The filename includes a timestamp for uniqueness.", + "type": "function", + "file_path": "pytorch\\benchmarks\\transformer\\sdpa.py", + "ast_data": "FunctionDef name:write_results_to_csv arg:experiments arg:output_dir arguments arg arg Call Assign Call Call Assign Call If Return return:no Assign Call Call Call If Compare Call With Call Assign Call Call For Assign Call If Compare Call Call" + }, + { + "library": "tensorflow", + "name": "__getitem__", + "source_code": "def __getitem__(self, key):\n if isinstance(key, list):\n key = tuple(key)\n elif not isinstance(key, tuple):\n key = (key,)\n if not key:\n return self\n if self.rank == 0:\n return self._scalar_getitem(key)\n else:\n return self._tensor_getitem(key)", + "docstring": "Returns the specified piece of this StructuredTensor. * If is scalar (i.e., a single structure), then returns the value of field (where must be a string). * If is non-scalar (i.e., a vector or higher-dimensional tensor of structures), selects an element or slice of the tensor using standard Python semantics (e.g., negative values index from the end). may have any of the following types: * constant * constant * scalar integer * containing integer constants and/or scalar integer s #### Multidimensional indexing supports multidimensional indexing. I.e., may be a of values, indexing or slicing multiple dimensions at once. For example, if is a vector of structures, each of which has a vector- valued field, then is equivalent to ; and will return a (possibly ragged) matrix of names, with shape . Args: key: Indicates which piece of the StructuredTensor to return. Returns: A , , or .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:key arguments arg arg If Call Assign Call If Call Assign If Return return:yes If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "minor", + "source_code": "@property\ndef minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0", + "docstring": "The second item of :attr: or `` if unavailable. >>> Version(\"1.2.3\").minor 2 >>> Version(\"1\").minor 0", + "type": "method", + "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py", + "ast_data": "FunctionDef name:minor arg:self arguments arg Return return:yes Compare Call" + }, + { + "library": "pygame", + "name": "get_surface", + "source_code": "def get_surface(self, dest_surf=None):\n abuffer, width, height = self.get_buffer()\n if not abuffer:\n return None\n surf = pygame.image.frombuffer(abuffer, (width, height), 'BGR')\n surf = pygame.transform.flip(surf, 0, 1)\n if dest_surf:\n dest_surf.blit(surf, (0, 0))\n else:\n dest_surf = surf\n return dest_surf", + "docstring": "Returns a pygame Surface.", + "type": "method", + "file_path": "pygame\\src_py\\_camera_vidcapture.py", + "ast_data": "FunctionDef name:get_surface arg:self arg:dest_surf arguments arg arg Assign Call If Return return:no Assign Call Assign Call If Call Assign Return return:yes" + }, + { + "library": "cryptography", + "name": "_get_u32", + "source_code": "def _get_u32(data: memoryview) -> tuple[int, memoryview]:\n if len(data) < 4:\n raise ValueError('Invalid data')\n return (int.from_bytes(data[:4], byteorder='big'), data[4:])", + "docstring": "Uint32", + "type": "function", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py", + "ast_data": "FunctionDef name:_get_u32 arg:data arguments arg If Compare Call Raise Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_pkl_filepath", + "source_code": "def _pkl_filepath(*args, **kwargs):\n py3_suffix = kwargs.get('py3_suffix', '_py3')\n basename, ext = splitext(args[-1])\n basename += py3_suffix\n new_args = args[:-1] + (basename + ext,)\n return join(*new_args)", + "docstring": "Return filename for Python 3 pickles args[-1] is expected to be the \".pkl\" filename. For compatibility with older scikit-learn versions, a suffix is inserted before the extension. _pkl_filepath('/path/to/folder', 'filename.pkl') returns '/path/to/folder/filename_py3.pkl'", + "type": "function", + "file_path": "scikit-learn\\sklearn\\datasets\\_base.py", + "ast_data": "FunctionDef name:_pkl_filepath arguments arg arg Assign Call Assign Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "validate_strides", + "source_code": "def validate_strides(strides: StrideType):\n assert isinstance(strides, Sequence)\n for stride in strides:\n assert stride >= 0", + "docstring": "Verifies the object specifies valid strides.", + "type": "function", + "file_path": "pytorch\\torch\\_prims_common\\__init__.py", + "ast_data": "FunctionDef name:validate_strides arg:strides arguments arg Call For Compare" + }, + { + "library": "tensorflow", + "name": "partial_batch_size", + "source_code": "@abc.abstractmethod\ndef partial_batch_size(self):\n raise NotImplementedError", + "docstring": "The size of the final partial batch for dataset. Will return None if has_partial_batch is False or batch_size is None.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", + "ast_data": "FunctionDef name:partial_batch_size arg:self arguments arg Raise" + }, + { + "library": "tensorflow", + "name": "encoding_specs", + "source_code": "def encoding_specs(self, spec):\n return spec._component_specs", + "docstring": "Returns a list of (s) describing the encoding for . See for a description of the default encoding. Subclasses may override this default definition, when necessary. Args: spec: The TypeSpec whose encoding should be described. Returns: A nest (as defined by tf.TypeSpecself.encode(spec, ...)`. All TypeSpecs in this nest must be batchable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py", + "ast_data": "FunctionDef name:encoding_specs arg:self arg:spec arguments arg arg Return return:yes" + }, + { + "library": "numpy", + "name": "legweight", + "source_code": "def legweight(x):\n w = x * 0.0 + 1.0\n return w", + "docstring": "Weight function of the Legendre polynomials. The weight function is :math: and the interval of integration is :math:. The Legendre polynomials are orthogonal, but not normalized, with respect to this weight function. Parameters ---------- x : array_like Values at which the weight function will be computed. Returns ------- w : ndarray The weight function at .", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\legendre.py", + "ast_data": "FunctionDef name:legweight arg:x arguments arg Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "default", + "source_code": "@expose\ndef default(self, *vpath, **params):\n rpcparams, rpcmethod = _xmlrpc.process_body()\n subhandler = self\n for attr in str(rpcmethod).split('.'):\n subhandler = getattr(subhandler, attr, None)\n if subhandler and getattr(subhandler, 'exposed', False):\n body = subhandler(*vpath + rpcparams, **params)\n else:\n raise Exception('method \"%s\" is not supported' % attr)\n conf = cherrypy.serving.request.toolmaps['tools'].get('xmlrpc', {})\n _xmlrpc.respond(body, conf.get('encoding', 'utf-8'), conf.get('allow_none', 0))\n return cherrypy.serving.response.body", + "docstring": "Process the unhandled XML-RPC methods.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cptools.py", + "ast_data": "FunctionDef name:default arg:self arguments arg arg arg Assign Call Assign For Call Call Assign Call If BoolOp Call Assign Call Raise Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "CCompiler_cxx_compiler", + "source_code": "def CCompiler_cxx_compiler(self):\n if self.compiler_type in ('msvc', 'intelw', 'intelemw'):\n return self\n cxx = copy(self)\n cxx.compiler_cxx = cxx.compiler_cxx\n cxx.compiler_so = [cxx.compiler_cxx[0]] + sanitize_cxx_flags(cxx.compiler_so[1:])\n if sys.platform.startswith(('aix', 'os400')) and 'ld_so_aix' in cxx.linker_so[0]:\n cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] + cxx.linker_so[2:]\n if sys.platform.startswith('os400'):\n cxx.compiler_so.append('-D__STDC_FORMAT_MACROS')\n cxx.compiler_so.append('-fno-extern-tls-init')\n cxx.linker_so.append('-fno-extern-tls-init')\n else:\n cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:]\n return cxx", + "docstring": "Return the C++ compiler. Parameters ---------- None Returns ------- cxx : class instance The C++ compiler, as a `` instance.", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\ccompiler.py", + "ast_data": "FunctionDef name:CCompiler_cxx_compiler arg:self arguments arg If Compare Return return:yes Assign Call Assign Assign Call If BoolOp Call Compare Assign If Call Call Call Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_interpolation", + "source_code": "def set_interpolation(self, s):\n s = mpl._val_or_rc(s, 'image.interpolation').lower()\n _api.check_in_list(interpolations_names, interpolation=s)\n self._interpolation = s\n self.stale = True", + "docstring": "Set the interpolation method the image uses when resizing. If None, use :rc:. If 'none', the image is shown as is without interpolating. 'none' is only supported in agg, ps and pdf backends and will fall back to 'nearest' mode for other backends. Parameters ---------- s : {'auto', 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos', 'none'} or None", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\image.py", + "ast_data": "FunctionDef name:set_interpolation arg:self arg:s arguments arg arg Assign Call Call Call Assign Assign" + }, + { + "library": "scipy", + "name": "_check_shape", + "source_code": "def _check_shape(argshape, size):\n scalar_shape = []\n bc = []\n for argdim, sizedim in zip_longest(argshape[::-1], size[::-1], fillvalue=1):\n if sizedim > argdim or argdim == sizedim == 1:\n scalar_shape.append(sizedim)\n bc.append(True)\n else:\n bc.append(False)\n return (tuple(scalar_shape[::-1]), tuple(bc[::-1]))", + "docstring": "This is a utility function used by in the class geninvgauss_gen. It compares the tuple argshape to the tuple size. Parameters ---------- argshape : tuple of integers Shape of the arguments. size : tuple of integers or integer Size argument of rvs(). Returns ------- The function returns two tuples, scalar_shape and bc. scalar_shape : tuple Shape to which the 1-d array of random variates returned by _rvs_scalar() is converted when it is copied into the output array of _rvs(). bc : tuple of booleans bc is an tuple the same length as size. bc[j] is True if the data associated with that index is generated in one call of _rvs_scalar().", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:_check_shape arg:argshape arg:size arguments arg arg Assign Assign For Call If BoolOp Compare Compare Call Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "parse_arguments", + "source_code": "def parse_arguments() -> argparse.Namespace:\n parser = argparse.ArgumentParser(description=\"Download and apply a Pull Request (PR) patch from the PyTorch GitHub repository to your local PyTorch installation.\\n\\nBest Practice: Since this script involves hot-patching PyTorch, it's recommended to use a disposable environment like a Docker container or a dedicated Python virtual environment (venv). This ensures that if the patching fails, you can easily recover by resetting the environment.\", epilog='Example:\\n python nightly_hotpatch.py 12345\\n python nightly_hotpatch.py 12345 --directory /path/to/pytorch --strip 1\\n\\nThese commands will download the patch for PR #12345 and apply it to your local PyTorch installation.', formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('PR_NUMBER', type=int, help='The number of the Pull Request (PR) from the PyTorch GitHub repository to download and apply as a patch.')\n parser.add_argument('--directory', '-d', type=str, default=None, help='Optional. Specify the target directory to apply the patch. If not provided, the script will use the PyTorch installation path.')\n parser.add_argument('--strip', '-p', type=int, default=1, help='Optional. Specify the strip count to remove leading directories from file paths in the patch. Default is 1.')\n return parser.parse_args()", + "docstring": "Parses command-line arguments using argparse. Returns: argparse.Namespace: The parsed arguments containing the PR number, optional target directory, and strip count.", + "type": "function", + "file_path": "pytorch\\tools\\nightly_hotpatch.py", + "ast_data": "FunctionDef name:parse_arguments arguments Assign Call Call Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "@_docstring.interpd\ndef __init__(self, bbox1, bbox2, loc1a, loc2a, loc1b, loc2b, **kwargs):\n if 'transform' in kwargs:\n raise ValueError('transform should not be set')\n super().__init__(bbox1, bbox2, loc1a, loc2a, **kwargs)\n self.loc1b = loc1b\n self.loc2b = loc2b", + "docstring": "Connect two bboxes with a quadrilateral. The quadrilateral is specified by two lines that start and end at corners of the bboxes. The four sides of the quadrilateral are defined by the two lines given, the line between the two corners specified in *bbox1* and the line between the two corners specified in *bbox2*. Parameters ---------- bbox1, bbox2 : Bounding boxes to connect. loc1a, loc2a, loc1b, loc2b : {1, 2, 3, 4} The first line connects corners *loc1a* of *bbox1* and *loc2a* of *bbox2*; the second line connects corners *loc1b* of *bbox1* and *loc2b* of *bbox2*. Valid values are:: 'upper right' : 1, 'upper left' : 2, 'lower left' : 3, 'lower right' : 4 **kwargs Patch properties for the line drawn: %(Patch:kwdoc)s", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\inset_locator.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:bbox1 arg:bbox2 arg:loc1a arg:loc2a arg:loc1b arg:loc2b arguments arg arg arg arg arg arg arg arg If Compare Raise Call Call Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "from_nested_row_splits", + "source_code": "@classmethod\n@dispatch.add_dispatch_support\ndef from_nested_row_splits(cls, flat_values, nested_row_splits, name=None, validate=True):\n if not isinstance(validate, bool):\n raise TypeError(f'Argument `validate` must have type bool. Received {validate}.')\n if isinstance(nested_row_splits, tensor_lib.Tensor):\n raise TypeError(f'Argument `nested_row_splits` must be a list of Tensors. Received {nested_row_splits}.')\n with ops.name_scope(name, 'RaggedFromNestedRowSplits', [flat_values] + list(nested_row_splits)):\n result = flat_values\n for splits in reversed(nested_row_splits):\n result = cls.from_row_splits(result, splits, validate=validate)\n return result", + "docstring": "Creates a from a nested list of tensors. Equivalent to: Args: flat_values: A potentially ragged tensor. nested_row_splits: A list of 1-D integer tensors. The th tensor is used as the for the th ragged dimension. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid . Note: these assertions incur a runtime cost, since they must be checked for each tensor value. Returns: A (or if is empty).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", + "ast_data": "FunctionDef name:from_nested_row_splits arg:cls arg:flat_values arg:nested_row_splits arg:name arg:validate arguments arg arg arg arg arg If Call Raise Call If Call Raise Call With Call Call Assign For Call Assign Call Return return:yes" + }, + { + "library": "seaborn", + "name": "z_score", + "source_code": "@staticmethod\ndef z_score(data2d, axis=1):\n if axis == 1:\n z_scored = data2d\n else:\n z_scored = data2d.T\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\n if axis == 1:\n return z_scored\n else:\n return z_scored.T", + "docstring": "Standarize the mean and variance of the data axis Parameters ---------- data2d : pandas.DataFrame Data to normalize axis : int Which axis to normalize across. If 0, normalize across rows, if 1, normalize across columns. Returns ------- normalized : pandas.DataFrame Noramlized data with a mean of 0 and variance of 1 across the specified axis.", + "type": "method", + "file_path": "seaborn\\seaborn\\matrix.py", + "ast_data": "FunctionDef name:z_score arg:data2d arg:axis arguments arg arg If Compare Assign Assign Assign Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "get_fusion_candidates", + "source_code": "def get_fusion_candidates(rule: GroupBatchFusionBase, root_node: torch.fx.Node, fused_set: OrderedSet[torch.fx.Node]) -> collections.defaultdict[Any, list[torch.fx.Node]]:\n q: collections.deque[tuple[int, torch.fx.Node]] = collections.deque()\n candidate_dict: collections.defaultdict[Any, list[torch.fx.Node]] = collections.defaultdict(list)\n if root_node.target in SEARCH_EXCLUSIONS:\n return candidate_dict\n visited_set = OrderedSet[torch.fx.Node]()\n for next_node in root_node.all_input_nodes:\n q.append((1, next_node))\n visited_set.add(next_node)\n while len(q) > 0:\n depth, node = q.popleft()\n if node in fused_set:\n continue\n key = rule.match(node)\n if key is not None:\n candidate_nodes = candidate_dict[key]\n if node not in candidate_nodes:\n candidate_nodes.append(node)\n elif depth < rule.graph_search_options['max_fuse_search_depth']:\n for next_node in node.all_input_nodes:\n if next_node not in visited_set:\n visited_set.add(next_node)\n q.append((depth + 1, next_node))\n return candidate_dict", + "docstring": "Search fusion candidates for a specific rule using BFS starting from the root node. We only search the subgraph within graph_search_options[\"max_fuse_search_depth\"].", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\group_batch_fusion.py", + "ast_data": "FunctionDef name:get_fusion_candidates arg:rule arg:root_node arg:fused_set arguments arg arg arg Call Call If Compare Return return:yes Assign Call For Call Call While Compare Call Assign Call If Compare Assign Call If Compare Assign If Compare Call If Compare For If Compare Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get", + "source_code": "def get(identifier):\n if isinstance(identifier, dict):\n return deserialize(identifier)\n elif isinstance(identifier, str):\n return deserialize(str(identifier))\n elif callable(identifier):\n return identifier\n else:\n raise ValueError('Could not interpret metric function identifier: {}'.format(identifier))", + "docstring": "Retrieves a Keras metric as a / class instance. The may be the string name of a metric function or class. >>> metric = tf.keras.metrics.get(\"categorical_crossentropy\") >>> type(metric) >>> metric = tf.keras.metrics.get(\"CategoricalCrossentropy\") >>> type(metric) You can also specify of the metric to this function by passing dict containing and as an identifier. Also note that the must map to a class >>> identifier = {\"class_name\": \"CategoricalCrossentropy\", ... \"config\": {\"from_logits\": True}} >>> metric = tf.keras.metrics.get(identifier) >>> type(metric) Args: identifier: A metric identifier. One of None or string name of a metric function/class or metric configuration dictionary or a metric function or a metric class instance Returns: A Keras metric as a / class instance. Raises: ValueError: If cannot be interpreted.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "FunctionDef name:get arg:identifier arguments arg If Call Return return:yes Call If Call Return return:yes Call Call If Call Return return:yes Raise Call Call" + }, + { + "library": "pytorch", + "name": "_get_obs_or_fq_map", + "source_code": "def _get_obs_or_fq_map(edge_or_node_to_group_id: dict[EdgeOrNode, int], edge_or_node_to_qspec: dict[EdgeOrNode, QuantizationSpecBase], is_qat: bool) -> dict[EdgeOrNode, ObserverOrFakeQuantize]:\n obs_or_fq_map: dict[EdgeOrNode, ObserverOrFakeQuantize] = {}\n group_id_to_obs_or_fq: dict[int, ObserverOrFakeQuantize] = {}\n for edge_or_node, qspec in edge_or_node_to_qspec.items():\n group_id = edge_or_node_to_group_id[edge_or_node]\n if group_id not in group_id_to_obs_or_fq:\n group_id_to_obs_or_fq[group_id] = _create_obs_or_fq_from_qspec(qspec, obs_or_fq_map, is_qat)\n obs_or_fq_map[edge_or_node] = group_id_to_obs_or_fq[group_id]\n return obs_or_fq_map", + "docstring": "Generates the EdgeOrNode to observer/fake_quant instances Makes sure that for EdgeOrNode that has the same group_id should have the same observer or fake quant instances", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\prepare.py", + "ast_data": "FunctionDef name:_get_obs_or_fq_map arg:edge_or_node_to_group_id arg:edge_or_node_to_qspec arg:is_qat arguments arg arg arg For Call Assign If Compare Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_update_sample_weight_modes", + "source_code": "def _update_sample_weight_modes(self, sample_weights=None):\n if not self._is_compiled:\n return\n if sample_weights and any((s is not None for s in sample_weights)):\n for endpoint in self._training_endpoints:\n endpoint.sample_weight_mode = endpoint.sample_weight_mode or 'samplewise'\n else:\n for endpoint in self._training_endpoints:\n endpoint.sample_weight_mode = None", + "docstring": "Updates sample weight modes based on training/eval inputs. Sample weight placeholders will be created for all or no outputs based on whether sample_weight is provided for any output. If model contains we check if the input corresponds to the sample weight modes. 1. Set sample weight mode to be 'temporal' for output i, if sample_weight_mode was set to and sample weight inputs are given for one or more outputs. 2. Set sample weight mode to be 'samplewise' for output i, if sample_weight_mode was not set and sample weight inputs are given for one or more outputs. 3. Reset sample weight mode to None for output i if sample weight mode was set but there is no sample weight input. Args: sample_weights: List of sample weights of the same length as model outputs or None.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py", + "ast_data": "FunctionDef name:_update_sample_weight_modes arg:self arg:sample_weights arguments arg arg If Return return:no If BoolOp Call Compare For Assign BoolOp For Assign" + }, + { + "library": "pytorch", + "name": "get_backend", + "source_code": "def get_backend(group: Optional[ProcessGroup]=None) -> Backend:\n pg = group or _get_default_group()\n if _rank_not_in_group(pg):\n raise ValueError('Invalid process group specified')\n pg_store = _world.pg_map.get(pg, None)\n if pg_store is None:\n raise ValueError(f'Process group {pg} is not initialized in the world group map. Please initialize the group first.')\n return Backend(not_none(pg_store)[0])", + "docstring": "Return the backend of the given process group. Args: group (ProcessGroup, optional): The process group to work on. The default is the general main process group. If another specific group is specified, the calling process must be part of :attr:. Returns: The backend of the given process group as a lower case string.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:get_backend arg:group arguments arg Assign BoolOp Call If Call Raise Call Assign Call If Compare Raise Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "AutoLabels", + "source_code": "@dataclasses.dataclass(frozen=True)\nclass AutoLabels:\n runtime: RuntimeMode\n autograd: AutogradMode\n language: Language\n\n @property\n def as_dict(self) -> dict[str, str]:\n return {'runtime': self.runtime.value, 'autograd': self.autograd.value, 'language': 'Python' if self.language == Language.PYTHON else 'C++'}", + "docstring": "Labels for a TimerArgs instance which are inferred during unpacking.", + "type": "class", + "file_path": "pytorch\\benchmarks\\instruction_counts\\core\\api.py", + "ast_data": "ClassDef name:AutoLabels FunctionDef name:as_dict arg:self arguments arg Return return:yes Compare Call" + }, + { + "library": "sphinx", + "name": "supports", + "source_code": "def supports(self, format: str) -> bool:\n return True", + "docstring": "All format-specific elements are supported.", + "type": "method", + "file_path": "sphinx\\sphinx\\writers\\xml.py", + "ast_data": "FunctionDef name:supports arg:self arg:format arguments arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, name: Text, num_replicas: int, pivot: ops.Operation):\n super(TPUReplicateContext, self).__init__()\n self._num_replicas = num_replicas\n self._outer_device_function_stack = None\n self._oc_dev_fn_stack = None\n self._outside_compilation_cluster = None\n self._is_map_outside_compilation = False\n self._outside_compilation_v2_context = None\n self._outside_compilation_counter = 0\n self._in_gradient_colocation = None\n self._gradient_colocation_stack = []\n self._host_compute_core = []\n self._name = name\n self._tpu_replicate_attr = attr_value_pb2.AttrValue(s=compat.as_bytes(self._name))\n self._unsupported_ops = []\n self._pivot = pivot\n self._replicated_vars = {}", + "docstring": "Builds a new TPUReplicateContext. Args: name: a unique name for the context, used to populate the attribute. num_replicas: an integer that gives the number of replicas for the computation. pivot: a pivot node. Nodes in the TPUReplicateContext that do not have any inputs will have a control dependency on the pivot node. This ensures that nodes are correctly included in any enclosing control flow contexts.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_replication.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:num_replicas arg:pivot arguments arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Call Call Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "_assert_all_paths_match", + "source_code": "def _assert_all_paths_match(values):\n paths = [_get_all_paths(st) for st in values]\n path_diff = set()\n for other_paths in paths[1:]:\n path_diff = path_diff.union(paths[0].symmetric_difference(other_paths))\n if path_diff:\n raise ValueError('Some paths are present in some, but not all, structured tensors: %r' % (path_diff,))", + "docstring": "Raises an error if the paths are not identical.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py", + "ast_data": "FunctionDef name:_assert_all_paths_match arg:values arguments arg Assign Call Assign Call For Assign Call Call If Raise Call" + }, + { + "library": "scikit-learn", + "name": "requires_vector_input", + "source_code": "@property\ndef requires_vector_input(self):\n return self.k1.requires_vector_input or self.k2.requires_vector_input", + "docstring": "Returns whether the kernel is stationary.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:requires_vector_input arg:self arguments arg Return return:yes BoolOp" + }, + { + "library": "django", + "name": "ask_merge", + "source_code": "def ask_merge(self, app_label):\n return self.defaults.get('ask_merge', False)", + "docstring": "Should these migrations really be merged?", + "type": "method", + "file_path": "django\\django\\db\\migrations\\questioner.py", + "ast_data": "FunctionDef name:ask_merge arg:self arg:app_label arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_maybe_cast_inputs", + "source_code": "def _maybe_cast_inputs(self, inputs):\n compute_dtype = self._compute_dtype\n if self._autocast and compute_dtype and dtypes.as_dtype(compute_dtype).is_floating:\n\n def f(x):\n cast_types = (tensor.Tensor, sparse_tensor.SparseTensor, ragged_tensor.RaggedTensor)\n if isinstance(x, cast_types) and x.dtype.is_floating and (x.dtype.base_dtype.name != compute_dtype):\n return math_ops.cast(x, compute_dtype)\n elif isinstance(x, tensor.TensorSpec) and x.dtype.is_floating:\n return tensor.TensorSpec(x.shape, compute_dtype, x.name)\n else:\n return x\n return nest.map_structure(f, inputs)\n else:\n return inputs", + "docstring": "Maybe casts the inputs to the compute dtype. If self._compute_dtype is floating-point, and self_autocast is True, floating-point inputs are casted to self._compute_dtype. Args: inputs: Input tensor, or structure of input tensors. Returns: , but tensors may have been casted to self._compute_dtype", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", + "ast_data": "FunctionDef name:_maybe_cast_inputs arg:self arg:inputs arguments arg arg Assign If BoolOp Call FunctionDef name:f arg:x arguments arg Assign If BoolOp Call Compare Return return:yes Call If BoolOp Call Return return:yes Call Return return:yes Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_next", + "source_code": "def get_next(self):\n pass", + "docstring": "Unlike __next__, this may use a non-raising mechanism.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\types\\distribute.py", + "ast_data": "FunctionDef name:get_next arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "handle_existing_rendezvous", + "source_code": "def handle_existing_rendezvous(self, expected_version):\n active_state = self.announce_self_waiting(expected_version)\n logger.info('Added self to waiting list. Rendezvous full state: %s', active_state.value)\n self.wait_for_rendezvous_to_free(expected_version)\n logger.info('Previously existing rendezvous state changed. Will re-try joining.')", + "docstring": "Handle the case when there's an existing (state 'final) rendezvous already in place, and we have to announce ourselves waiting, and wait until the next rendezvous opportunity.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_rendezvous.py", + "ast_data": "FunctionDef name:handle_existing_rendezvous arg:self arg:expected_version arguments arg arg Assign Call Call Call Call" + }, + { + "library": "scipy", + "name": "__call__", + "source_code": "def __call__(self, z):\n z = np.asarray(z)\n zv = np.ravel(z)\n support_values = self._support_values.reshape((self._support_values.shape[0], -1))\n weights = self.weights[..., np.newaxis]\n with np.errstate(invalid='ignore', divide='ignore'):\n CC = 1 / np.subtract.outer(zv, self._support_points)\n r = CC @ (weights * support_values) / (CC @ weights)\n if np.any(np.isinf(zv)):\n r[np.isinf(zv)] = np.sum(weights * support_values) / np.sum(weights)\n ii = np.nonzero(np.isnan(r))[0]\n for jj in ii:\n if np.isnan(zv[jj]) or not np.any(zv[jj] == self._support_points):\n pass\n else:\n r[jj] = support_values[zv[jj] == self._support_points].squeeze()\n return np.reshape(r, z.shape + self._shape)", + "docstring": "Evaluate the rational approximation at given values. Parameters ---------- z : array_like Input values.", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_bary_rational.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:z arguments arg arg Assign Call Assign Call Assign Call Assign With Call Assign Call Assign If Call Call Assign Call Call Call Assign Call Call For If BoolOp Call Call Compare Assign Call Compare Return return:yes Call" + }, + { + "library": "kornia", + "name": "_initialize_parameters", + "source_code": "def _initialize_parameters(self, pinholes: Iterable[PinholeCamera]) -> 'PinholeCamerasList':\n if not isinstance(pinholes, (list, tuple)):\n raise TypeError(f'pinhole must of type list or tuple. Got {type(pinholes)}')\n height, width = ([], [])\n intrinsics, extrinsics = ([], [])\n for pinhole in pinholes:\n if not isinstance(pinhole, PinholeCamera):\n raise TypeError(f'Argument pinhole must be from type PinholeCamera. Got {type(pinhole)}')\n height.append(pinhole.height)\n width.append(pinhole.width)\n intrinsics.append(pinhole.intrinsics)\n extrinsics.append(pinhole.extrinsics)\n self.height: Tensor = stack(height, dim=1)\n self.width: Tensor = stack(width, dim=1)\n self._intrinsics: Tensor = stack(intrinsics, dim=1)\n self._extrinsics: Tensor = stack(extrinsics, dim=1)\n return self", + "docstring": "Initialise the class attributes given a cameras list.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", + "ast_data": "FunctionDef name:_initialize_parameters arg:self arg:pinholes arguments arg arg If Call Raise Call Call Assign Assign For If Call Raise Call Call Call Call Call Call Call Call Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "distort", + "source_code": "def distort(self, params: Tensor, points: Vector2) -> Vector2:\n fx, fy, cx, cy = (params[..., 0], params[..., 1], params[..., 2], params[..., 3])\n u = points.x * fx + cx\n v = points.y * fy + cy\n return Vector2.from_coords(u, v)", + "docstring": "Distort one or more Vector2 points using the affine transform. Args: params: Tensor representing the affine transform parameters. points: Vector2 representing the points to distort. Returns: Vector2 representing the distorted points. Example: >>> params = Tensor([1., 2., 3., 4.]) >>> points = Vector2.from_coords(1., 2.) >>> AffineTransform().distort(params, points) x: 4.0 y: 8.0", + "type": "method", + "file_path": "kornia\\kornia\\sensors\\camera\\distortion_model.py", + "ast_data": "FunctionDef name:distort arg:self arg:params arg:points arguments arg arg arg Assign Assign Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "TestEnvironment", + "source_code": "class TestEnvironment(object):\n\n def __init__(self):\n self.tf_data_service_dispatcher = None\n self.total_phsyical_gpus = None\n\n def __setattr__(self, name, value):\n if not in_main_process():\n raise ValueError('combinations.env() should only be modified in the main process. Condition your code on combinations.in_main_process().')\n super().__setattr__(name, value)", + "docstring": "Holds the test environment information. Tests should modify the attributes of the instance returned by in the main process if needed, and it will be passed to the worker processes each time a test case is run.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py", + "ast_data": "ClassDef name:TestEnvironment FunctionDef name:__init__ arg:self arguments arg Assign Assign FunctionDef name:__setattr__ arg:self arg:name arg:value arguments arg arg arg If Call Raise Call Call Call" + }, + { + "library": "scipy", + "name": "roots_genlaguerre", + "source_code": "def roots_genlaguerre(n, alpha, mu=False):\n m = int(n)\n if n < 1 or n != m:\n raise ValueError('n must be a positive integer.')\n if alpha < -1:\n raise ValueError('alpha must be greater than -1.')\n mu0 = _ufuncs.gamma(alpha + 1)\n if m == 1:\n x = np.array([alpha + 1.0], 'd')\n w = np.array([mu0], 'd')\n if mu:\n return (x, w, mu0)\n else:\n return (x, w)\n\n def an_func(k):\n return 2 * k + alpha + 1\n\n def bn_func(k):\n return -np.sqrt(k * (k + alpha))\n\n def f(n, x):\n return _ufuncs.eval_genlaguerre(n, alpha, x)\n\n def df(n, x):\n return (n * _ufuncs.eval_genlaguerre(n, alpha, x) - (n + alpha) * _ufuncs.eval_genlaguerre(n - 1, alpha, x)) / x\n return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu)", + "docstring": "Gauss-generalized Laguerre quadrature. Compute the sample points and weights for Gauss-generalized Laguerre quadrature. The sample points are the roots of the nth degree generalized Laguerre polynomial, :math:. These sample points and weights correctly integrate polynomials of degree :math: or less over the interval :math: with weight function :math:. See 22.3.9 in [AS]_ for details. Parameters ---------- n : int quadrature order alpha : float alpha must be > -1 mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_orthogonal.py", + "ast_data": "FunctionDef name:roots_genlaguerre arg:n arg:alpha arg:mu arguments arg arg arg Assign Call If BoolOp Compare Compare Raise Call If Compare Raise Call Assign Call If Compare Assign Call Assign Call If Return return:yes Return return:yes FunctionDef name:an_func arg:k arguments arg Return return:yes FunctionDef name:bn_func arg:k arguments arg Return return:yes Call FunctionDef name:f arg:n arg:x arguments arg arg Return return:yes Call FunctionDef name:df arg:n arg:x arguments arg arg Return return:yes Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_reshard", + "source_code": "@no_type_check\ndef _reshard(state: _FSDPState, handle: FlatParamHandle, free_unsharded_flat_param: bool):\n handle.reshard(free_unsharded_flat_param)\n if state.limit_all_gathers and free_unsharded_flat_param:\n if not torch.distributed._functional_collectives.is_torchdynamo_compiling():\n free_event = state._device_handle.Event()\n free_event.record()\n state._free_event_queue.enqueue(free_event)\n handle.post_reshard()\n handle._prefetched = False", + "docstring": "Reshards the handle. `` indicates whether to free the handle's padded unsharded flat parameter.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py", + "ast_data": "FunctionDef name:_reshard arg:state arg:handle arg:free_unsharded_flat_param arguments arg arg arg Call If BoolOp If Call Assign Call Call Call Call Assign" + }, + { + "library": "pytorch", + "name": "fwd_only", + "source_code": "@torch.no_grad()\ndef fwd_only(fn: Callable[..., Any], args: Sequence[Any], *, run_functional_passes: bool=True, get_decomp_fn: Optional[Callable[..., Any]]=None) -> torch.fx.GraphModule:\n with enable_python_dispatcher():\n decompositions = get_decomp_fn() if get_decomp_fn is not None else select_decomp_table()\n gm = make_fx(fn, decompositions, tracing_mode='real')(*args)\n from .fx_passes.post_grad import remove_noop_ops\n if run_functional_passes:\n remove_noop_ops(gm.graph)\n gm.graph.eliminate_dead_code()\n gm.recompile()\n return gm", + "docstring": "Build a normalized inference graph, for use with fx_to_pattern", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py", + "ast_data": "FunctionDef name:fwd_only arg:fn arg:args arguments arg arg arg arg With Call Assign Compare Call Call Assign Call Call If Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "static_lengths", + "source_code": "def static_lengths(self, ragged_lengths=True):\n if self.num_row_partitions == 0:\n return self._static_inner_shape_as_list(False)\n first_dim = self.row_partitions[0].static_nrows\n if isinstance(first_dim, tensor_shape.Dimension):\n first_dim = first_dim.value\n rp_dims = [first_dim]\n for rp in self.row_partitions:\n if rp.is_uniform():\n rp_dims.append(rp.static_uniform_row_length)\n elif ragged_lengths:\n const_vals = tensor_util.constant_value(rp.row_lengths())\n if const_vals is None:\n rp_dims.append(None)\n else:\n rp_dims.append(tuple(const_vals.tolist()))\n else:\n rp_dims.append(None)\n return rp_dims + self._static_inner_shape_as_list(True)", + "docstring": "Returns a list of statically known axis lengths. This represents what values are known. For each row partition, it presents either the uniform row length (if statically known), the list of row lengths, or none if it is not statically known. For the inner shape, if the rank is known, then each dimension is reported if known, and None otherwise. If the rank of the inner shape is not known, then the returned list ends with an ellipsis. Args: ragged_lengths: If false, returns None for all ragged dimensions. Returns: A Sequence[Union[Sequence[int],int, None]] of lengths, with a possible Ellipsis at the end.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:static_lengths arg:self arg:ragged_lengths arguments arg arg If Compare Return return:yes Call Assign If Call Assign Assign For If Call Call If Assign Call Call If Compare Call Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "isnamedtuple", + "source_code": "def isnamedtuple(f):\n if not (tf_inspect.isclass(f) and issubclass(f, tuple)):\n return False\n if not hasattr(f, '_fields'):\n return False\n fields = getattr(f, '_fields')\n if not isinstance(fields, tuple):\n return False\n if not all((isinstance(f, str) for f in fields)):\n return False\n return True", + "docstring": "Returns True if the argument is a namedtuple-like.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\inspect_utils.py", + "ast_data": "FunctionDef name:isnamedtuple arg:f arguments arg If BoolOp Call Call Return return:yes If Call Return return:yes Assign Call If Call Return return:yes If Call Call Return return:yes Return return:yes" + }, + { + "library": "sphinx", + "name": "build_navpoints", + "source_code": "def build_navpoints(self, nodes: list[dict[str, Any]]) -> list[NavPoint]:\n navstack: list[NavPoint] = [NavPoint('dummy', 0, '', '', [])]\n level = 0\n lastnode = None\n for node in nodes:\n if not node['text']:\n continue\n file = node['refuri'].split('#')[0]\n if file in self.ignored_files:\n continue\n if node['level'] > self.config.epub_tocdepth:\n continue\n if node['level'] == level:\n navpoint = self.new_navpoint(node, level)\n navstack.pop()\n navstack[-1].children.append(navpoint)\n navstack.append(navpoint)\n elif node['level'] == level + 1:\n level += 1\n if lastnode and self.config.epub_tocdup:\n navstack[-1].children.append(self.new_navpoint(lastnode, level, False))\n navpoint = self.new_navpoint(node, level)\n navstack[-1].children.append(navpoint)\n navstack.append(navpoint)\n elif node['level'] < level:\n while node['level'] < len(navstack):\n navstack.pop()\n level = node['level']\n navpoint = self.new_navpoint(node, level)\n navstack[-1].children.append(navpoint)\n navstack.append(navpoint)\n else:\n msg = __('node has an invalid level')\n raise ValueError(msg)\n lastnode = node\n return navstack[0].children", + "docstring": "Create the toc navigation structure. Subelements of a node are nested inside the navpoint. For nested nodes the parent node is reinserted in the subnav.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\_epub_base.py", + "ast_data": "FunctionDef name:build_navpoints arg:self arg:nodes arguments arg arg Call Assign Assign For If Assign Call If Compare If Compare If Compare Assign Call Call Call Call If Compare If BoolOp Call Call Assign Call Call Call If Compare While Compare Call Call Assign Assign Call Call Call Assign Call Raise Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "cumprod", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef cumprod(x, axis=0):\n return math_ops.cumprod(x, axis=axis)", + "docstring": "Cumulative product of the values in a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to compute the product. Returns: A tensor of the cumulative product of values of along .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:cumprod arg:x arg:axis arguments arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_sort_levels_monotonic", + "source_code": "def _sort_levels_monotonic(self) -> Self:\n return self", + "docstring": "Compat with MultiIndex.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_sort_levels_monotonic arg:self arguments arg Return return:yes" + }, + { + "library": "cherrypy", + "name": "SvcOther", + "source_code": "def SvcOther(self, control):\n from cherrypy import process\n process.bus.publish(control_codes.key_for(control))", + "docstring": "Send a command to the service.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\win32.py", + "ast_data": "FunctionDef name:SvcOther arg:self arg:control arguments arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "_reduce", + "source_code": "def _reduce(v):\n if reduction == 'concat' and _collective_all_reduce_multi_worker(strategy):\n return _multi_worker_concat(v, strategy)\n if not _is_per_replica_instance(v):\n return v\n elif reduction == 'first':\n return strategy.unwrap(v)[0]\n elif reduction == 'concat':\n if _is_tpu_multi_host(strategy):\n return _tpu_multi_host_concat(v, strategy)\n else:\n return concat(strategy.unwrap(v))\n else:\n raise ValueError('`reduction` must be \"first\" or \"concat\".')", + "docstring": "Reduce a single object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", + "ast_data": "FunctionDef name:_reduce arg:v arguments arg If BoolOp Compare Call Return return:yes Call If Call Return return:yes If Compare Return return:yes Call If Compare If Call Return return:yes Call Return return:yes Call Call Raise Call" + }, + { + "library": "sphinx", + "name": "check_uri", + "source_code": "def check_uri(self, refnode: nodes.reference) -> None:\n if 'internal' in refnode or 'refuri' not in refnode:\n return\n uri = refnode['refuri']\n title = refnode.astext()\n for alias, (base_uri, _caption) in self.app.config.extlinks.items():\n uri_pattern = re.compile(re.escape(base_uri).replace('%s', '(?P.+)'))\n match = uri_pattern.match(uri)\n if match and match.groupdict().get('value') and ('/' not in match.groupdict()['value']):\n msg = __('hardcoded link %r could be replaced by an extlink (try using %r instead)')\n value = match.groupdict().get('value')\n if uri != title:\n replacement = f':{alias}:`{rst.escape(title)} <{value}>`'\n else:\n replacement = f':{alias}:`{value}`'\n logger.warning(msg, uri, replacement, location=refnode)", + "docstring": "If the URI in ``, emit a warning with a replacement suggestion.", + "type": "method", + "file_path": "sphinx\\sphinx\\ext\\extlinks.py", + "ast_data": "FunctionDef name:check_uri arg:self arg:refnode arguments arg arg If BoolOp Compare Compare Return return:no Assign Assign Call For Call Assign Call Call Call Assign Call If BoolOp Call Call Compare Call Assign Call Assign Call Call If Compare Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "_GroupByDevices", + "source_code": "def _GroupByDevices(self, saveables):\n per_device = collections.defaultdict(lambda: [])\n for saveable in saveables:\n canonical_device = set((pydev.canonical_name(spec.device) for spec in saveable.specs))\n if len(canonical_device) != 1:\n raise ValueError('All tensors of a saveable object must be on the same device: %s' % saveable.name)\n per_device[canonical_device.pop()].append(saveable)\n return sorted(per_device.items(), key=lambda t: t[0])", + "docstring": "Group Variable tensor slices per device. TODO(touts): Make sure that all the devices found are on different job/replica/task/cpu|gpu. It would be bad if 2 were on the same device. It can happen if the devices are unspecified. Args: saveables: A list of BaseSaverBuilder.SaveableObject objects. Returns: A list of tuples: (device_name, BaseSaverBuilder.SaveableObject) tuples. The list is sorted by ascending device_name. Raises: ValueError: If the tensors of a saveable are on different devices.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py", + "ast_data": "FunctionDef name:_GroupByDevices arg:self arg:saveables arguments arg arg Assign Call arguments For Assign Call Call If Compare Call Raise Call Call Call Return return:yes Call Call arguments arg" + }, + { + "library": "scikit-learn", + "name": "get_feature_names_out", + "source_code": "def get_feature_names_out(self, input_features=None):\n check_is_fitted(self, 'f_')\n class_name = self.__class__.__name__.lower()\n return np.asarray([f'{class_name}0'], dtype=object)", + "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Ignored. Returns ------- feature_names_out : ndarray of str objects An ndarray with one string i.e. [\"isotonicregression0\"].", + "type": "method", + "file_path": "scikit-learn\\sklearn\\isotonic.py", + "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "before_nearest_workday", + "source_code": "def before_nearest_workday(dt: datetime) -> datetime:\n return previous_workday(nearest_workday(dt))", + "docstring": "returns previous workday before nearest workday", + "type": "function", + "file_path": "pandas\\pandas\\tseries\\holiday.py", + "ast_data": "FunctionDef name:before_nearest_workday arg:dt arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "uniform_row_length", + "source_code": "def uniform_row_length(self):\n return self._uniform_row_length", + "docstring": "Returns the length of each row in this partition, if rows are uniform. If all rows in this have the same length, then this returns that length as a scalar integer . Otherwise, it returns . Returns: scalar Tensor with , or .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:uniform_row_length arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_plot_commands", + "source_code": "@_api.deprecated('3.7', pending=True)\ndef get_plot_commands() -> list[str]:\n NON_PLOT_COMMANDS = {'connect', 'disconnect', 'get_current_fig_manager', 'ginput', 'new_figure_manager', 'waitforbuttonpress'}\n return [name for name in _get_pyplot_commands() if name not in NON_PLOT_COMMANDS]", + "docstring": "Get a sorted list of all of the plotting commands.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", + "ast_data": "FunctionDef name:get_plot_commands arguments Assign Return return:yes Call Compare Call" + }, + { + "library": "tensorflow", + "name": "_get_tf2_flags", + "source_code": "def _get_tf2_flags(parser):\n input_file_group = parser.add_mutually_exclusive_group()\n input_file_group.add_argument('--saved_model_dir', type=str, help='Full path of the directory containing the SavedModel.')\n input_file_group.add_argument('--keras_model_file', type=str, help='Full filepath of HDF5 file containing tf.Keras model.')\n parser.add_argument('--saved_model_tag_set', type=str, help='Comma-separated set of tags identifying the MetaGraphDef within the SavedModel to analyze. All tags must be present. In order to pass in an empty tag set, pass in \"\". (default \"serve\")')\n parser.add_argument('--saved_model_signature_key', type=str, help='Key identifying the SignatureDef containing inputs and outputs. (default DEFAULT_SERVING_SIGNATURE_DEF_KEY)')\n parser.add_argument('--enable_v1_converter', action='store_true', help='Enables the TensorFlow V1 converter in 2.0')", + "docstring": "Returns ArgumentParser for tflite_convert for TensorFlow 2.0. Args: parser: ArgumentParser", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\tflite_convert.py", + "ast_data": "FunctionDef name:_get_tf2_flags arg:parser arguments arg Assign Call Call Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "@_docstring.Substitution(_quiver_doc)\ndef __init__(self, ax, *args, scale=None, headwidth=3, headlength=5, headaxislength=4.5, minshaft=1, minlength=1, units='width', scale_units=None, angles='uv', width=None, color='k', pivot='tail', **kwargs):\n self._axes = ax\n X, Y, U, V, C = _parse_args(*args, caller_name='quiver')\n self.X = X\n self.Y = Y\n self.XY = np.column_stack((X, Y))\n self.N = len(X)\n self.scale = scale\n self.headwidth = headwidth\n self.headlength = float(headlength)\n self.headaxislength = headaxislength\n self.minshaft = minshaft\n self.minlength = minlength\n self.units = units\n self.scale_units = scale_units\n self.angles = angles\n self.width = width\n if pivot.lower() == 'mid':\n pivot = 'middle'\n self.pivot = pivot.lower()\n _api.check_in_list(self._PIVOT_VALS, pivot=self.pivot)\n self.transform = kwargs.pop('transform', ax.transData)\n kwargs.setdefault('facecolors', color)\n kwargs.setdefault('linewidths', (0,))\n super().__init__([], offsets=self.XY, offset_transform=self.transform, closed=False, **kwargs)\n self.polykw = kwargs\n self.set_UVC(U, V, C)\n self._dpi_at_last_init = None", + "docstring": "The constructor takes one required argument, an Axes instance, followed by the args and kwargs described by the following pyplot interface documentation: %s", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\quiver.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:ax arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Assign Call Assign Assign Assign Call Assign Call Assign Assign Assign Call Assign Assign Assign Assign Assign Assign Assign If Compare Call Assign Assign Call Call Assign Call Call Call Call Call Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "in_top_k", + "source_code": "@tf_export(v1=['math.in_top_k', 'nn.in_top_k'])\n@dispatch.add_dispatch_support\ndef in_top_k(predictions, targets, k, name=None):\n with ops.name_scope(name, 'in_top_k'):\n return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)", + "docstring": "Says whether the targets are in the top predictions. This outputs a bool array, an entry is if the prediction for the target class is finite (not inf, -inf, or nan) and among the top predictions among all predictions for example . Note that the behavior of differs from the op in its handling of ties; if multiple classes have the same prediction value and straddle the top- boundary, all of those classes are considered to be in the top . More formally, let \\\\(predictions_i\\\\) be the predictions for all classes for example , \\\\(targets_i\\\\) be the target class for example , \\\\(out_i\\\\) be the output for example , $$out_i = predictions_{i, targets_i} \\in TopKIncludingTies(predictions_i)$$ Args: predictions: A of type . A x tensor. targets: A . Must be one of the following types: , . A vector of class ids. k: An . Number of top elements to look at for computing precision. name: A name for the operation (optional). Returns: A of type . Computed Precision at as a .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:in_top_k arg:predictions arg:targets arg:k arg:name arguments arg arg arg arg With Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "clear_preprocessing", + "source_code": "def clear_preprocessing(self):\n pass", + "docstring": "Restore this APIChangeSpec to before it preprocessed a file. This is needed if preprocessing a file changed any rewriting rules.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py", + "ast_data": "FunctionDef name:clear_preprocessing arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "scatter_min", + "source_code": "def scatter_min(self, sparse_delta, use_locking=False, name=None):\n if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n return gen_state_ops.scatter_min(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)", + "docstring": "Updates this variable with the min of and itself. Args: sparse_delta: to use as an argument of min with this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered minimization has completed. Raises: TypeError: if is not an .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", + "ast_data": "FunctionDef name:scatter_min arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call" + }, + { + "library": "django", + "name": "BoundWidget", + "source_code": "@html_safe\nclass BoundWidget:\n\n def __init__(self, parent_widget, data, renderer):\n self.parent_widget = parent_widget\n self.data = data\n self.renderer = renderer\n\n def __str__(self):\n return self.tag(wrap_label=True)\n\n def tag(self, wrap_label=False):\n context = {'widget': {**self.data, 'wrap_label': wrap_label}}\n return self.parent_widget._render(self.template_name, context, self.renderer)\n\n @property\n def template_name(self):\n if 'template_name' in self.data:\n return self.data['template_name']\n return self.parent_widget.template_name\n\n @property\n def id_for_label(self):\n return self.data['attrs'].get('id')\n\n @property\n def choice_label(self):\n return self.data['label']", + "docstring": "A container class used for iterating over widgets. This is useful for widgets that have choices. For example, the following can be used in a template: {% for radio in myform.beatles %} {{ radio.choice_label }} {{ radio.tag }} {% endfor %}", + "type": "class", + "file_path": "django\\django\\forms\\boundfield.py", + "ast_data": "ClassDef name:BoundWidget FunctionDef name:__init__ arg:self arg:parent_widget arg:data arg:renderer arguments arg arg arg arg Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call FunctionDef name:tag arg:self arg:wrap_label arguments arg arg Assign Return return:yes Call FunctionDef name:template_name arg:self arguments arg If Compare Return return:yes Return return:yes FunctionDef name:id_for_label arg:self arguments arg Return return:yes Call FunctionDef name:choice_label arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "_validate_fs", + "source_code": "def _validate_fs(fs, allow_none=True):\n if fs is None:\n if not allow_none:\n raise ValueError('Sampling frequency can not be none.')\n else:\n if not np.isscalar(fs):\n raise ValueError('Sampling frequency fs must be a single scalar.')\n fs = float(fs)\n return fs", + "docstring": "Check if the given sampling frequency is a scalar and raises an exception otherwise. If allow_none is False, also raises an exception for none sampling rates. Returns the sampling frequency as float or none if the input is none.", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_arraytools.py", + "ast_data": "FunctionDef name:_validate_fs arg:fs arg:allow_none arguments arg arg If Compare If Raise Call If Call Raise Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, graph_def, input_tensors, output_tensors, input_arrays_with_shape=None, output_arrays=None, experimental_debug_info_func=None):\n super(TFLiteConverter, self).__init__(graph_def, input_tensors, output_tensors, input_arrays_with_shape, output_arrays, experimental_debug_info_func)", + "docstring": "Constructor for TFLiteConverter. Args: graph_def: Frozen TensorFlow GraphDef. input_tensors: List of input tensors. Type and shape are computed using and . output_tensors: List of output tensors (only .name is used from this). input_arrays_with_shape: Tuple of strings representing input tensor names and list of integers representing input shapes (e.g., [(\"foo\" : [1, 16, 16, 3])]). Use only when graph cannot be loaded into TensorFlow and when and are None. (default None) output_arrays: List of output tensors to freeze graph with. Use only when graph cannot be loaded into TensorFlow and when and are None. (default None) experimental_debug_info_func: An experimental function to retrieve the graph debug info for a set of nodes from the . Raises: ValueError: Invalid arguments.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:graph_def arg:input_tensors arg:output_tensors arg:input_arrays_with_shape arg:output_arrays arg:experimental_debug_info_func arguments arg arg arg arg arg arg arg Call Call" + }, + { + "library": "scikit-learn", + "name": "BaseEnsemble", + "source_code": "class BaseEnsemble(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta):\n\n @abstractmethod\n def __init__(self, estimator=None, *, n_estimators=10, estimator_params=tuple()):\n self.estimator = estimator\n self.n_estimators = n_estimators\n self.estimator_params = estimator_params\n\n def _validate_estimator(self, default=None):\n if self.estimator is not None:\n self.estimator_ = self.estimator\n else:\n self.estimator_ = default\n\n def _make_estimator(self, append=True, random_state=None):\n estimator = clone(self.estimator_)\n estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params})\n if random_state is not None:\n _set_random_states(estimator, random_state)\n if append:\n self.estimators_.append(estimator)\n return estimator\n\n def __len__(self):\n return len(self.estimators_)\n\n def __getitem__(self, index):\n return self.estimators_[index]\n\n def __iter__(self):\n return iter(self.estimators_)", + "docstring": "Base class for all ensemble classes. Warning: This class should not be used directly. Use derived classes instead. Parameters ---------- estimator : object The base estimator from which the ensemble is built. n_estimators : int, default=10 The number of estimators in the ensemble. estimator_params : list of str, default=tuple() The list of attributes to use as parameters when instantiating a new base estimator. If none are given, default parameters are used. Attributes ---------- estimator_ : estimator The base estimator from which the ensemble is grown. estimators_ : list of estimators The collection of fitted base estimators.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\ensemble\\_base.py", + "ast_data": "ClassDef name:BaseEnsemble FunctionDef name:__init__ arg:self arg:estimator arguments arg arg arg arg Call Assign Assign Assign FunctionDef name:_validate_estimator arg:self arg:default arguments arg arg If Compare Assign Assign FunctionDef name:_make_estimator arg:self arg:append arg:random_state arguments arg arg arg Assign Call Call Call If Compare Call If Call Return return:yes FunctionDef name:__len__ arg:self arguments arg Return return:yes Call FunctionDef name:__getitem__ arg:self arg:index arguments arg arg Return return:yes FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None):\n self._more_validate_params()\n alpha = self.nu / 2\n self._fit(X, alpha=alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, offset_init=offset_init, sample_weight=sample_weight)\n return self", + "docstring": "Fit linear One-Class SVM with Stochastic Gradient Descent. This solves an equivalent optimization problem of the One-Class SVM primal optimization problem and returns a weight vector w and an offset rho such that the decision function is given by - rho. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : Ignored Not used, present for API consistency by convention. coef_init : array, shape (n_classes, n_features) The initial coefficients to warm-start the optimization. offset_init : array, shape (n_classes,) The initial offset to warm-start the optimization. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples. If not provided, uniform weights are assumed. These weights will be multiplied with class_weight (passed through the constructor) if class_weight is specified. Returns ------- self : object Returns a fitted instance of self.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:coef_init arg:offset_init arg:sample_weight arguments arg arg arg arg arg arg Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_shape", + "source_code": "def get_shape(self) -> tensor_shape.TensorShape:\n return self.shape", + "docstring": "Alias of Tensor.shape.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:get_shape arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "_moment_central", + "source_code": "def _moment_central(self, order=1, *, method=None):\n methods = self._moment_methods if method is None else {method}\n return self._moment_central_dispatch(order, methods=methods, **self._parameters)", + "docstring": "Distribution moment about the mean.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py", + "ast_data": "FunctionDef name:_moment_central arg:self arg:order arguments arg arg arg Assign Compare Return return:yes Call" + }, + { + "library": "pandas", + "name": "memory_usage_bytes", + "source_code": "@property\ndef memory_usage_bytes(self) -> int:\n deep = self.memory_usage == 'deep'\n return self.data.memory_usage(index=True, deep=deep)", + "docstring": "Memory usage in bytes. Returns ------- memory_usage_bytes : int Object's total memory usage in bytes.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\info.py", + "ast_data": "FunctionDef name:memory_usage_bytes arg:self arguments arg Assign Compare Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_initialize_save_and_restore_functions", + "source_code": "def _initialize_save_and_restore_functions(self):\n checkpoint_factory_map, registered_savers = save_util_v1.get_checkpoint_factories_and_keys(self.object_names)\n self._obj_to_registered_saver = object_identity.ObjectIdentityDictionary()\n for saver_name, trackables in registered_savers.items():\n for trackable in trackables.values():\n self._obj_to_registered_saver[trackable] = saver_name\n self._saveable_objects_map = _gen_save_and_restore_functions(checkpoint_factory_map)", + "docstring": "Generates all checkpoint save/restore functions. The save and restore functions are generated in the eager context (or in the user's Graph/Session) before being copied to the exported GraphDef. These functions record the ops for saving/restoring the entire object or individual objects (e.g. variables and hash tables). The global save and restore functions are generated for compatibility with TF1 and loading from C++, and is saved in the . The individual functions are generated for the Python TF2 use case, where users use the loaded SavedModel as-is, or compose new models using parts of the object loaded from the SavedModel. These functions are recorded in the map in the proto.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py", + "ast_data": "FunctionDef name:_initialize_save_and_restore_functions arg:self arguments arg Assign Call Assign Call For Call For Call Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "_make_tensor_into_per_replica", + "source_code": "def _make_tensor_into_per_replica(input_tensor):\n if isinstance(input_tensor, value_lib.DistributedValues):\n return input_tensor\n if not tensor_util.is_tensor(input_tensor):\n input_tensor = ops.convert_to_tensor(input_tensor)\n if hasattr(input_tensor, 'device'):\n return value_lib.PerReplica((input_tensor,))\n raise ValueError(\"Cannot convert `input_tensor` to a `PerReplica` object because it doesn't have device set.\")", + "docstring": "Converts a single tensor into a PerReplica object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py", + "ast_data": "FunctionDef name:_make_tensor_into_per_replica arg:input_tensor arguments arg If Call Return return:yes If Call Assign Call If Call Return return:yes Call Raise Call" + }, + { + "library": "pytorch", + "name": "bsr_scatter_mm_indices_data", + "source_code": "def bsr_scatter_mm_indices_data(bsr, other, indices_format='bsr_strided_mm_compressed', **meta_input):\n assert bsr.dense_dim() == 0\n assert bsr.ndim == 2\n blocksize = bsr.values().shape[-2:]\n M, K = bsr.shape\n Ms, Ks = blocksize\n K_, N = other.shape[-2:]\n assert K_ == K\n nbatches = other.shape[:-2].numel()\n meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)\n if 'allow_tf32' not in meta_input:\n meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})\n SPLIT_N = meta['SPLIT_N']\n indices_data = _bsr_scatter_mm_indices_data(indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr))\n if indices_format == 'bsr_strided_mm_compressed':\n meta.update(is_compressed=True)\n return indices_data + (meta,)\n elif indices_format == 'bsr_strided_mm':\n meta.update(is_compressed=False)\n return indices_data + (meta,)\n else:\n return indices_data", + "docstring": "Computes indices data for :func: used in BSR and strided tensor matrix multiplication.", + "type": "function", + "file_path": "pytorch\\torch\\sparse\\_triton_ops.py", + "ast_data": "FunctionDef name:bsr_scatter_mm_indices_data arg:bsr arg:other arg:indices_format arguments arg arg arg arg Compare Call Compare Assign Call Assign Assign Assign Compare Assign Call Assign Call If Compare Call Compare Assign Assign Call Call If Compare Call Return return:yes If Compare Call Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "non_reducing_slice", + "source_code": "def non_reducing_slice(slice_: Subset):\n kinds = (ABCSeries, np.ndarray, Index, list, str)\n if isinstance(slice_, kinds):\n slice_ = IndexSlice[:, slice_]\n\n def pred(part) -> bool:\n if isinstance(part, tuple):\n return any((isinstance(s, slice) or is_list_like(s) for s in part))\n else:\n return isinstance(part, slice) or is_list_like(part)\n if not is_list_like(slice_):\n if not isinstance(slice_, slice):\n slice_ = [[slice_]]\n else:\n slice_ = [slice_]\n else:\n slice_ = [p if pred(p) else [p] for p in slice_]\n return tuple(slice_)", + "docstring": "Ensure that a slice doesn't reduce to a Series or Scalar. Any user-passed should have this called on it to make sure we're always working with DataFrames.", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\style_render.py", + "ast_data": "FunctionDef name:non_reducing_slice arg:slice_ arguments arg Assign If Call Assign FunctionDef name:pred arg:part arguments arg If Call Return return:yes Call BoolOp Call Call Return return:yes BoolOp Call Call If Call If Call Assign Assign Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "clear", + "source_code": "def clear(self):\n raise NotImplementedError('subclasses of BaseCache must provide a clear() method')", + "docstring": "Remove *all* values from the cache at once.", + "type": "method", + "file_path": "django\\django\\core\\cache\\backends\\base.py", + "ast_data": "FunctionDef name:clear arg:self arguments arg Raise Call" + }, + { + "library": "django", + "name": "savepoint_commit_sql", + "source_code": "def savepoint_commit_sql(self, sid):\n return 'RELEASE SAVEPOINT %s' % self.quote_name(sid)", + "docstring": "Return the SQL for committing the given savepoint.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\operations.py", + "ast_data": "FunctionDef name:savepoint_commit_sql arg:self arg:sid arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "Mishra10", + "source_code": "class Mishra10(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n self.global_optimum = [[2.0, 2.0]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n x1, x2 = (int(x[0]), int(x[1]))\n f1 = x1 + x2\n f2 = x1 * x2\n return (f1 - f2) ** 2.0", + "docstring": "Mishra 10 objective function. This class defines the Mishra 10 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: TODO - int(x) should be used instead of floor(x)!!!!! f_{\\text{Mishra10}}({x}) = \\left[ \\lfloor x_1 \\perp x_2 \\rfloor - \\lfloor x_1 \\rfloor - \\lfloor x_2 \\rfloor \\right]^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005 TODO line 1115", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py", + "ast_data": "ClassDef name:Mishra10 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Assign Assign Return return:yes" + }, + { + "library": "scipy", + "name": "entropy", + "source_code": "def entropy(self, loc=None, shape=1, df=1):\n dim, loc, shape, df = self._process_parameters(None, shape, df)\n return self._entropy(dim, df, shape)", + "docstring": "Calculate the differential entropy of a multivariate t-distribution. Parameters ---------- %(_mvt_doc_default_callparams)s Returns ------- h : float Differential entropy", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:entropy arg:self arg:loc arg:shape arg:df arguments arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "TensorContainer", + "source_code": "class TensorContainer:\n pass", + "docstring": "Container for tensors as attributes", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\debug_utils.py", + "ast_data": "ClassDef name:TensorContainer" + }, + { + "library": "pandas", + "name": "_constructor", + "source_code": "@cache_readonly\ndef _constructor(self) -> type[Index]:\n return Index", + "docstring": "return the class to use for construction", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\range.py", + "ast_data": "FunctionDef name:_constructor arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "weight_is_quantized", + "source_code": "def weight_is_quantized(qconfig):\n return weight_dtype(qconfig) in [torch.quint8, torch.qint8, torch.float16, torch.quint4x2, torch.uint8, torch.int8, torch.int16, torch.int32, torch.float8_e5m2, torch.float8_e4m3fn]", + "docstring": "Given a qconfig, decide if the weight needs to be quantized or not", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\utils.py", + "ast_data": "FunctionDef name:weight_is_quantized arg:qconfig arguments arg Return return:yes Compare Call" + }, + { + "library": "django", + "name": "ForwardOneToOneDescriptor", + "source_code": "class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor):\n\n def get_object(self, instance):\n if self.field.remote_field.parent_link:\n deferred = instance.get_deferred_fields()\n rel_model = self.field.remote_field.model\n fields = [field.attname for field in rel_model._meta.concrete_fields]\n if not any((field in fields for field in deferred)):\n kwargs = {field: getattr(instance, field) for field in fields}\n obj = rel_model(**kwargs)\n obj._state.adding = instance._state.adding\n obj._state.db = instance._state.db\n return obj\n return super().get_object(instance)\n\n def __set__(self, instance, value):\n super().__set__(instance, value)\n if self.field.primary_key and self.field.remote_field.parent_link:\n opts = instance._meta\n inherited_pk_fields = [field for field in opts.concrete_fields if field.primary_key and field.remote_field]\n for field in inherited_pk_fields:\n rel_model_pk_name = field.remote_field.model._meta.pk.attname\n raw_value = getattr(value, rel_model_pk_name) if value is not None else None\n setattr(instance, rel_model_pk_name, raw_value)", + "docstring": "Accessor to the related object on the forward side of a one-to-one relation. In the example:: class Restaurant(Model): place = OneToOneField(Place, related_name='restaurant') `` instance.", + "type": "class", + "file_path": "django\\django\\db\\models\\fields\\related_descriptors.py", + "ast_data": "ClassDef name:ForwardOneToOneDescriptor FunctionDef name:get_object arg:self arg:instance arguments arg arg If Assign Call Assign Assign If Call Compare Assign Call Assign Call Assign Assign Return return:yes Return return:yes Call Call FunctionDef name:__set__ arg:self arg:instance arg:value arguments arg arg arg Call Call If BoolOp Assign Assign BoolOp For Assign Assign Compare Call Call" + }, + { + "library": "tensorflow", + "name": "_tf_data_packed_nest_with_indices", + "source_code": "def _tf_data_packed_nest_with_indices(structure, flat, index):\n packed = []\n for s in _tf_data_yield_value(structure):\n if _tf_data_is_nested(s):\n new_index, child = _tf_data_packed_nest_with_indices(s, flat, index)\n packed.append(sequence_like(s, child))\n index = new_index\n else:\n packed.append(flat[index])\n index += 1\n return (index, packed)", + "docstring": "Helper function for pack_nest_as. Args: structure: Substructure (tuple of elements and/or tuples) to mimic flat: Flattened values to output substructure for. index: Index at which to start reading from flat. Returns: The tuple (new_index, child), where: * new_index - the updated index into having processed . * packed - the subset of corresponding to , having started at , and packed into the same nested format. Raises: ValueError: if contains more elements than (assuming indexing starts from ).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py", + "ast_data": "FunctionDef name:_tf_data_packed_nest_with_indices arg:structure arg:flat arg:index arguments arg arg arg Assign For Call If Call Assign Call Call Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_attention_scale", + "source_code": "def _attention_scale(g: jit_utils.GraphContext, query: torch._C.Value) -> torch._C.Value:\n query_shape = g.op('Shape', query)\n query_shape_last = g.op('Slice', query_shape, g.op('Constant', value_t=torch.tensor([-1], dtype=torch.int64)), g.op('Constant', value_t=torch.tensor([_constants.INT64_MAX], dtype=torch.int64)))\n embedding_size = g.op('Cast', query_shape_last, to_i=_type_utils.JitScalarType.from_value(query).onnx_type())\n const_one = g.op('Constant', value_t=torch.tensor([1.0], dtype=torch.float))\n scale = g.op('Div', const_one, g.op('Sqrt', embedding_size))\n scale = g.op('Cast', scale, to_i=_type_utils.JitScalarType.from_value(query).onnx_type())\n return scale", + "docstring": "Calculate the scale factor for the attention result. Args: query: Tensor of shape [..., L, E] Returns: Scalar scale factor := 1 / math.sqrt(query.size(-1))", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\symbolic_opset14.py", + "ast_data": "FunctionDef name:_attention_scale arg:g arg:query arguments arg arg Assign Call Assign Call Call Call Call Call Assign Call Call Call Assign Call Call Assign Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "check_for_type_equality", + "source_code": "def check_for_type_equality(g1, g2):\n for n, m in zip(g1.nodes, g2.nodes):\n if n.type != m.type:\n return False\n return True", + "docstring": "A check equality to be used in fixed points. We do not use graph equality but instead type equality.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\unify_refinements.py", + "ast_data": "FunctionDef name:check_for_type_equality arg:g1 arg:g2 arguments arg arg For Call If Compare Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "_merge_outputs", + "source_code": "def _merge_outputs(self, output_chunks: list[Any]) -> Any:\n return merge_chunks(output_chunks, self._output_merge_spec)", + "docstring": "Merge output chunks back to a batch state. If output_merge_spec is None, the utility will merge output chunks by dimension 0 (batch dim).", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py", + "ast_data": "FunctionDef name:_merge_outputs arg:self arg:output_chunks arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "EmbeddingFeature", + "source_code": "class EmbeddingFeature(enum.Enum):\n UNSUPPORTED = 'UNSUPPORTED'\n V1 = 'V1'\n V2 = 'V2'", + "docstring": "Embedding feature flag strings. UNSUPPORTED: No embedding lookup accelerator available on the tpu. V1: Embedding lookup accelerator V1. The embedding lookup operation can only be placed at the beginning of computation. Only one instance of embedding lookup layer is allowed. V2: Embedding lookup accelerator V2. The embedding lookup operation can be placed anywhere of the computation. Multiple instances of embedding lookup layer is allowed.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_hardware_feature.py", + "ast_data": "ClassDef name:EmbeddingFeature Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "mod", + "source_code": "def mod(self, x0: T, x1: T) -> T:\n raise NotImplementedError", + "docstring": "C-style modulus, take sign from LHS (x0).", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ops_handler.py", + "ast_data": "FunctionDef name:mod arg:self arg:x0 arg:x1 arguments arg arg arg Raise" + }, + { + "library": "pytorch", + "name": "flatten_args_detach", + "source_code": "def flatten_args_detach(args):\n flat_detached_args = []\n\n def extract_tensor_args(a):\n nonlocal flat_detached_args\n if isinstance(a, torch.Tensor):\n val = a.detach().requires_grad_(a.requires_grad)\n flat_detached_args.append(val)\n return val\n else:\n flat_detached_args.append(a)\n return a\n new_args = fx.node.map_aggregate(args, extract_tensor_args)\n return (new_args, flat_detached_args)", + "docstring": "Flatten the args into a list form and detach the tensors from computational graph.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\pipelining\\_utils.py", + "ast_data": "FunctionDef name:flatten_args_detach arg:args arguments arg Assign FunctionDef name:extract_tensor_args arg:a arguments arg If Call Assign Call Call Call Return return:yes Call Return return:yes Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "lu_matrix_inverse", + "source_code": "@tf_export('linalg.lu_matrix_inverse')\n@dispatch.add_dispatch_support\ndef lu_matrix_inverse(lower_upper, perm, validate_args=False, name=None):\n with ops.name_scope(name or 'lu_matrix_inverse'):\n lower_upper = ops.convert_to_tensor(lower_upper, dtype_hint=dtypes.float32, name='lower_upper')\n perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')\n assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)\n if assertions:\n with ops.control_dependencies(assertions):\n lower_upper = array_ops.identity(lower_upper)\n perm = array_ops.identity(perm)\n shape = array_ops.shape(lower_upper)\n return lu_solve(lower_upper, perm, rhs=eye(shape[-1], batch_shape=shape[:-2], dtype=lower_upper.dtype), validate_args=False)", + "docstring": "Computes the inverse given the LU decomposition(s) of one or more matrices. This op is conceptually identical to, Note: this function does not verify the implied matrix is actually invertible nor is this condition checked even when . Args: lower_upper: as returned by , i.e., if then . perm: as returned by , i.e., if then . validate_args: Python indicating whether arguments should be checked for correctness. Note: this function does not verify the implied matrix is actually invertible, even when . Default value: (i.e., don't validate arguments). name: Python name given to ops managed by this object. Default value: (i.e., 'lu_matrix_inverse'). Returns: inv_x: The matrix_inv, i.e., . #### Examples", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linalg_impl.py", + "ast_data": "FunctionDef name:lu_matrix_inverse arg:lower_upper arg:perm arg:validate_args arg:name arguments arg arg arg arg With Call BoolOp Assign Call Assign Call Assign Call If With Call Assign Call Assign Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "check_layout", + "source_code": "@tf_export('experimental.dtensor.check_layout', v1=[])\ndef check_layout(tensor: tensor_lib.Tensor, layout: layout_lib.Layout) -> None:\n if fetch_layout(tensor) != layout:\n raise ValueError('Layout of tensor: ' + str(fetch_layout(tensor)) + ', did not match expected layout: ' + str(layout))", + "docstring": "Asserts that the layout of the DTensor is . Args: tensor: A DTensor whose layout is to be checked. layout: The to compare against. Raises: ValueError: If the layout of does not match the supplied .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\api.py", + "ast_data": "FunctionDef name:check_layout arg:tensor arg:layout arguments arg arg If Compare Call Raise Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "res_binop", + "source_code": "def res_binop(self, ns, types_ns, node, left, right):\n raise NotImplementedError('subclasses must implement')", + "docstring": "Resolves the return type of a binary operation.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\type_inference.py", + "ast_data": "FunctionDef name:res_binop arg:self arg:ns arg:types_ns arg:node arg:left arg:right arguments arg arg arg arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "get_tpu_system_metadata", + "source_code": "def get_tpu_system_metadata(self):\n cluster_spec = self.cluster_spec()\n cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None\n tpu_system_metadata = tpu_system_metadata_lib._query_tpu_system_metadata(self.master(), cluster_def=cluster_def, query_topology=False)\n return tpu_system_metadata", + "docstring": "Returns the metadata of the TPU system. Users can call this method to get some facts of the TPU system, like total number of cores, number of TPU workers and the devices. E.g. Returns: A object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py", + "ast_data": "FunctionDef name:get_tpu_system_metadata arg:self arguments arg Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "num_slices_in_dimension", + "source_code": "def num_slices_in_dimension(self, axis):\n if axis < 0:\n return constant_op.constant(1, dtype=self.dim_size_dtype)\n elif self.is_ragged(axis):\n return math_ops.reduce_sum(self._partitioned_dim_sizes[axis])\n else:\n return self.dimension_size(axis) * self.num_slices_in_dimension(axis - 1)", + "docstring": "Returns the total number of slices across the indicated dimension.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py", + "ast_data": "FunctionDef name:num_slices_in_dimension arg:self arg:axis arguments arg arg If Compare Return return:yes Call If Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "isscalarlike", + "source_code": "def isscalarlike(x) -> bool:\n return np.isscalar(x) or (isdense(x) and x.ndim == 0)", + "docstring": "Is x either a scalar, an array scalar, or a 0-dim array?", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\_sputils.py", + "ast_data": "FunctionDef name:isscalarlike arg:x arguments arg Return return:yes BoolOp Call BoolOp Call Compare" + }, + { + "library": "kornia", + "name": "transform_keypoints", + "source_code": "def transform_keypoints(self, input: Union[Tensor, Keypoints], params: List[ParamItem], extra_args: Optional[Dict[str, Any]]=None) -> Union[Tensor, Keypoints]:\n if isinstance(input, Tensor):\n batchsize, frame_num = (input.size(0), input.size(1))\n input = Keypoints(input.view(-1, input.size(2), input.size(3)))\n input = super().transform_keypoints(input, params, extra_args=extra_args)\n input = input.data.view(batchsize, frame_num, -1, 2)\n else:\n input = super().transform_keypoints(input, params, extra_args=extra_args)\n return input", + "docstring": "Transform bounding boxes. Args: input: tensor with shape :math:. If input is a type, the internal shape is :math:. params: params for the sequence. extra_args: Optional dictionary of extra arguments with specific options for different input types.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\video.py", + "ast_data": "FunctionDef name:transform_keypoints arg:self arg:input arg:params arg:extra_args arguments arg arg arg arg If Call Assign Call Call Assign Call Call Call Call Assign Call Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_compute_best_split_and_push", + "source_code": "def _compute_best_split_and_push(self, node):\n node.split_info = self.splitter.find_node_split(n_samples=node.n_samples, histograms=node.histograms, sum_gradients=node.sum_gradients, sum_hessians=node.sum_hessians, value=node.value, lower_bound=node.children_lower_bound, upper_bound=node.children_upper_bound, allowed_features=node.allowed_features)\n if node.split_info.gain <= 0:\n self._finalize_leaf(node)\n else:\n heappush(self.splittable_nodes, node)", + "docstring": "Compute the best possible split (SplitInfo) of a given node. Also push it in the heap of splittable nodes if gain isn't zero. The gain of a node is 0 if either all the leaves are pure (best gain = 0), or if no split would satisfy the constraints, (min_hessians_to_split, min_gain_to_split, min_samples_leaf)", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\grower.py", + "ast_data": "FunctionDef name:_compute_best_split_and_push arg:self arg:node arguments arg arg Assign Call If Compare Call Call" + }, + { + "library": "matplotlib", + "name": "_str_equal", + "source_code": "def _str_equal(obj, s):\n return isinstance(obj, str) and obj == s", + "docstring": "Return whether *obj* is a string equal to string *s*. This helper solely exists to handle the case where *obj* is a numpy array, because in such cases, a naive `` would yield an array, which cannot be used in a boolean context.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:_str_equal arg:obj arg:s arguments arg arg Return return:yes BoolOp Call Compare" + }, + { + "library": "tensorflow", + "name": "get_gpu_type", + "source_code": "def get_gpu_type():\n global GPU_TYPE\n key = 'gpu_type_no_sudo'\n gpu_dict = cuda_compute_capability.retrieve_from_golden()\n out, err = run_shell_cmd(cmds_all[PLATFORM][key])\n ret_val = out.split(b' ')\n gpu_id = ret_val[0]\n if err and FLAGS.debug:\n print('Error in detecting GPU type:\\n %s' % str(err))\n if not isinstance(ret_val, list):\n GPU_TYPE = 'unknown'\n return (gpu_id, GPU_TYPE)\n else:\n if '[' or ']' in ret_val[1]:\n gpu_release = ret_val[1].replace(b'[', b'') + b' '\n gpu_release += ret_val[2].replace(b']', b'').strip(b'\\n')\n else:\n gpu_release = ret_val[1].replace('\\n', ' ')\n if gpu_release not in gpu_dict:\n GPU_TYPE = 'unknown'\n else:\n GPU_TYPE = gpu_release\n return (gpu_id, GPU_TYPE)", + "docstring": "Retrieves GPU type. Returns: String that is the name of the detected NVIDIA GPU. e.g. 'Tesla K80' 'unknown' will be returned if detected GPU type is an unknown name. Unknown name refers to any GPU name that is not specified in this page:", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py", + "ast_data": "FunctionDef name:get_gpu_type arguments Assign Assign Call Assign Call Assign Call Assign If BoolOp Call Call If Call Assign Return return:yes If BoolOp Compare Assign Call Call Call Assign Call If Compare Assign Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "FitFailedWarning", + "source_code": "class FitFailedWarning(RuntimeWarning):\n pass", + "docstring": "Warning class used if there is an error while fitting the estimator. This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV and the cross-validation helper function cross_val_score to warn when there is an error while fitting the estimator. .. versionchanged:: 0.18 Moved from sklearn.cross_validation.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\exceptions.py", + "ast_data": "ClassDef name:FitFailedWarning" + }, + { + "library": "pandas", + "name": "idxmin", + "source_code": "def idxmin(self, axis: Axis=0, skipna: bool=True, *args, **kwargs) -> Hashable:\n axis = self._get_axis_number(axis)\n iloc = self.argmin(axis, skipna, *args, **kwargs)\n return self.index[iloc]", + "docstring": "Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, or if ``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], index=[\"A\", \"B\", \"C\", \"D\"]) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A'", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:idxmin arg:self arg:axis arg:skipna arguments arg arg arg arg arg Assign Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "_validate_index_level", + "source_code": "@final\ndef _validate_index_level(self, level) -> None:\n if isinstance(level, int):\n if level < 0 and level != -1:\n raise IndexError(f'Too many levels: Index has only 1 level, {level} is not a valid level number')\n if level > 0:\n raise IndexError(f'Too many levels: Index has only 1 level, not {level + 1}')\n elif level != self.name:\n raise KeyError(f'Requested level ({level}) does not match index name ({self.name})')", + "docstring": "Validate index level. For single-level Index getting level number is a no-op, but some verification must be done like in MultiIndex.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_validate_index_level arg:self arg:level arguments arg arg If Call If BoolOp Compare Compare Raise Call If Compare Raise Call If Compare Raise Call" + }, + { + "library": "kornia", + "name": "RgbToYuv422", + "source_code": "class RgbToYuv422(Module):\n ONNX_EXPORTABLE = False\n\n def forward(self, yuvinput: Tensor) -> tuple[Tensor, Tensor]:\n return rgb_to_yuv422(yuvinput)", + "docstring": "Convert an image from RGB to YUV422. Width must be evenly disvisible by 2. The image data is assumed to be in the range of :math:. YUV formula follows M/PAL values (see _, Table 2, items 2.5 and 2.6). Returns: YUV422 version of the image. Shape: - image: :math: - output: :math: and :math: Examples: >>> yuvinput = torch.rand(2, 3, 4, 6) >>> yuv = RgbToYuv422() >>> output = yuv(yuvinput) # # (2x1x4x6, 2x2x4x3) Reference:: [1]", + "type": "class", + "file_path": "kornia\\kornia\\color\\yuv.py", + "ast_data": "ClassDef name:RgbToYuv422 Assign FunctionDef name:forward arg:self arg:yuvinput arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "pmf", + "source_code": "def pmf(self, x, n, p):\n return np.exp(self.logpmf(x, n, p))", + "docstring": "Multinomial probability mass function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. %(_doc_default_callparams)s Returns ------- pmf : ndarray or scalar Probability density function evaluated at Notes ----- %(_doc_callparams_note)s", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:pmf arg:self arg:x arg:n arg:p arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_set_iters", + "source_code": "def _set_iters(self, iters):\n if isinstance(iters, tensor_lib.Tensor):\n iters = tensor_util.constant_value(iters)\n self._maybe_iters = iters", + "docstring": "Set number of pfor iterations.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:_set_iters arg:self arg:iters arguments arg arg If Call Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "group", + "source_code": "def group(self, group_id):\n self._validate_group_id(group_id)\n return self._Context(self, group_id)", + "docstring": "Enter a context where the lock is with group . Args: group_id: The group for which to acquire and release the lock. Returns: A context manager which will acquire the lock for .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\util\\lock_util.py", + "ast_data": "FunctionDef name:group arg:self arg:group_id arguments arg arg Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "ValidationError", + "source_code": "class ValidationError(Exception):\n pass", + "docstring": "Raised for validation errors.", + "type": "class", + "file_path": "sphinx\\sphinx\\cmd\\quickstart.py", + "ast_data": "ClassDef name:ValidationError" + }, + { + "library": "numpy", + "name": "_ufunc_doc_signature_formatter", + "source_code": "def _ufunc_doc_signature_formatter(ufunc):\n if ufunc.nin == 1:\n in_args = 'x'\n else:\n in_args = ', '.join((f'x{i + 1}' for i in range(ufunc.nin)))\n if ufunc.nout == 0:\n out_args = ', /, out=()'\n elif ufunc.nout == 1:\n out_args = ', /, out=None'\n else:\n out_args = '[, {positional}], / [, out={default}]'.format(positional=', '.join((f'out{i + 1}' for i in range(ufunc.nout))), default=repr((None,) * ufunc.nout))\n kwargs = \", casting='same_kind', order='K', dtype=None, subok=True\"\n if ufunc.signature is None:\n kwargs = f', where=True{kwargs}[, signature]'\n else:\n kwargs += '[, signature, axes, axis]'\n return f'{ufunc.__name__}({in_args}{out_args}, *{kwargs})'", + "docstring": "Builds a signature string which resembles PEP 457 This is used to construct the first line of the docstring", + "type": "function", + "file_path": "numpy\\numpy\\_core\\_internal.py", + "ast_data": "FunctionDef name:_ufunc_doc_signature_formatter arg:ufunc arguments arg If Compare Assign Assign Call Call If Compare Assign If Compare Assign Assign Call Call Call Call Assign If Compare Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_compute_n_features_outs", + "source_code": "def _compute_n_features_outs(self):\n output = [len(cats) for cats in self.categories_]\n if self._drop_idx_after_grouping is not None:\n for i, drop_idx in enumerate(self._drop_idx_after_grouping):\n if drop_idx is not None:\n output[i] -= 1\n if not self._infrequent_enabled:\n return output\n for i, infreq_idx in enumerate(self._infrequent_indices):\n if infreq_idx is None:\n continue\n output[i] -= infreq_idx.size - 1\n return output", + "docstring": "Compute the n_features_out for each input feature.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_encoders.py", + "ast_data": "FunctionDef name:_compute_n_features_outs arg:self arguments arg Assign Call If Compare For Call If Compare If Return return:yes For Call If Compare Return return:yes" + }, + { + "library": "numpy", + "name": "NDArrayAsType", + "source_code": "class NDArrayAsType(Benchmark):\n params = [list(itertools.combinations(TYPES1, 2))]\n param_names = ['typeconv']\n timeout = 10\n\n def setup(self, typeconv):\n if typeconv[0] == typeconv[1]:\n raise NotImplementedError('Skipping test for converting to the same dtype')\n self.xarg = get_squares_().get(typeconv[0])\n\n def time_astype(self, typeconv):\n self.xarg.astype(typeconv[1])", + "docstring": "Benchmark for type conversion", + "type": "class", + "file_path": "numpy\\benchmarks\\benchmarks\\bench_ufunc.py", + "ast_data": "ClassDef name:NDArrayAsType Assign Call Call Assign Assign FunctionDef name:setup arg:self arg:typeconv arguments arg arg If Compare Raise Call Assign Call Call FunctionDef name:time_astype arg:self arg:typeconv arguments arg arg Call" + }, + { + "library": "django", + "name": "get_object", + "source_code": "def get_object(self, queryset=None):\n year = self.get_year()\n month = self.get_month()\n day = self.get_day()\n date = _date_from_string(year, self.get_year_format(), month, self.get_month_format(), day, self.get_day_format())\n qs = self.get_queryset() if queryset is None else queryset\n if not self.get_allow_future() and date > datetime.date.today():\n raise Http404(_('Future %(verbose_name_plural)s not available because %(class_name)s.allow_future is False.') % {'verbose_name_plural': qs.model._meta.verbose_name_plural, 'class_name': self.__class__.__name__})\n lookup_kwargs = self._make_single_date_lookup(date)\n qs = qs.filter(**lookup_kwargs)\n return super().get_object(queryset=qs)", + "docstring": "Get the object this request displays.", + "type": "method", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "FunctionDef name:get_object arg:self arg:queryset arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Call Call Assign Compare Call If BoolOp Call Compare Call Raise Call Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "from_fullargspec_and_signature", + "source_code": "@classmethod\ndef from_fullargspec_and_signature(cls, fullargspec, input_signature, is_pure=False, name=None, jit_compile=None):\n function_type, default_values = to_function_type(fullargspec)\n if input_signature:\n input_signature = tuple(input_signature)\n _validate_signature(input_signature)\n function_type = function_type_lib.add_type_constraints(function_type, input_signature, default_values)\n return FunctionSpec(function_type, default_values, is_pure, name, jit_compile)", + "docstring": "Construct FunctionSpec from legacy FullArgSpec format.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_type_utils.py", + "ast_data": "FunctionDef name:from_fullargspec_and_signature arg:cls arg:fullargspec arg:input_signature arg:is_pure arg:name arg:jit_compile arguments arg arg arg arg arg arg Assign Call If Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "validate_version", + "source_code": "def validate_version(self, where=None) -> None:\n if where is not None:\n if self.is_old_version:\n ws = incompatibility_doc % '.'.join([str(x) for x in self.version])\n warnings.warn(ws, IncompatibilityWarning, stacklevel=find_stack_level())", + "docstring": "are we trying to operate on an old version?", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:validate_version arg:self arg:where arguments arg arg If Compare If Assign Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "_should_be_skipped_or_marked", + "source_code": "def _should_be_skipped_or_marked(estimator, check, expected_failed_checks: dict[str, str] | None=None) -> tuple[bool, str]:\n expected_failed_checks = expected_failed_checks or {}\n check_name = _check_name(check)\n if check_name in expected_failed_checks:\n return (True, expected_failed_checks[check_name])\n return (False, 'Check is not expected to fail')", + "docstring": "Check whether a check should be skipped or marked as xfail. Parameters ---------- estimator : estimator object Estimator instance for which to generate checks. check : partial or callable Check to be marked. expected_failed_checks : dict[str, str], default=None Dictionary of the form {check_name: reason} for checks that are expected to fail. Returns ------- should_be_marked : bool Whether the check should be marked as xfail or skipped. reason : str Reason for skipping the check.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py", + "ast_data": "FunctionDef name:_should_be_skipped_or_marked arg:estimator arg:check arg:expected_failed_checks arguments arg arg arg Assign BoolOp Assign Call If Compare Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, colormaps, combination_mode, name='multivariate colormap'):\n self.name = name\n if not np.iterable(colormaps) or len(colormaps) == 1 or isinstance(colormaps, str):\n raise ValueError('A MultivarColormap must have more than one colormap.')\n colormaps = list(colormaps)\n for i, cmap in enumerate(colormaps):\n if isinstance(cmap, str):\n colormaps[i] = mpl.colormaps[cmap]\n elif not isinstance(cmap, Colormap):\n raise ValueError('colormaps must be a list of objects that subclass Colormap or a name found in the colormap registry.')\n self._colormaps = colormaps\n _api.check_in_list(['sRGB_add', 'sRGB_sub'], combination_mode=combination_mode)\n self._combination_mode = combination_mode\n self.n_variates = len(colormaps)\n self._rgba_bad = (0.0, 0.0, 0.0, 0.0)", + "docstring": "Parameters ---------- colormaps: list or tuple of objects The individual colormaps that are combined combination_mode: str, 'sRGB_add' or 'sRGB_sub' Describe how colormaps are combined in sRGB space - If 'sRGB_add' -> Mixing produces brighter colors - If 'sRGB_sub' -> Mixing produces darker colors name : str, optional The name of the colormap family.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:colormaps arg:combination_mode arg:name arguments arg arg arg arg Assign If BoolOp Call Compare Call Call Raise Call Assign Call For Call If Call Assign If Call Raise Call Assign Call Assign Assign Call Assign" + }, + { + "library": "pandas", + "name": "_set_name", + "source_code": "def _set_name(self, name, inplace: bool=False, deep: bool | None=None) -> Series:\n inplace = validate_bool_kwarg(inplace, 'inplace')\n ser = self if inplace else self.copy(deep=False)\n ser.name = name\n return ser", + "docstring": "Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify directly or return a copy.", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:_set_name arg:self arg:name arg:inplace arg:deep arguments arg arg arg arg Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "kornia", + "name": "__neg__", + "source_code": "def __neg__(self) -> 'Quaternion':\n return Quaternion(-self.data)", + "docstring": "Inverts the sign of the quaternion data. Example: >>> q = Quaternion.identity() >>> -q.data tensor([-1., -0., -0., -0.], grad_fn=)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\quaternion.py", + "ast_data": "FunctionDef name:__neg__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "find_kernel_bounds", + "source_code": "def find_kernel_bounds(string):\n kernel_end = 0\n kernel_positions = []\n while string.find('<<<', kernel_end) != -1:\n kernel_start = string.find('<<<', kernel_end)\n kernel_end = string.find('>>>', kernel_start) + 3\n if kernel_end <= 0:\n raise InputError('no kernel end found')\n kernel_positions.append({'start': kernel_start, 'end': kernel_end, 'group': string[kernel_start:kernel_end]})\n return kernel_positions", + "docstring": "Finds the starting and ending points for all kernel launches in the string.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py", + "ast_data": "FunctionDef name:find_kernel_bounds arg:string arguments arg Assign Assign While Compare Call Assign Call Assign Call If Compare Raise Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_write_reason_section", + "source_code": "def _write_reason_section(self):\n self._write_report('%s %s\\n' % (_MARKER_SECTION_BEGIN, _SECTION_NAME_REASON))\n for key in sorted(self.instrument_records):\n self._write_report('\"%s\" %s\\n' % (key, self.instrument_records[key]))\n self._write_report('%s %s\\n' % (_MARKER_SECTION_END, _SECTION_NAME_REASON))", + "docstring": "Writes the reason section of the report.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py", + "ast_data": "FunctionDef name:_write_reason_section arg:self arguments arg Call For Call Call Call" + }, + { + "library": "tensorflow", + "name": "to_yaml", + "source_code": "def to_yaml(self, **kwargs):\n raise RuntimeError('Method `model.to_yaml()` has been removed due to security risk of arbitrary code execution. Please use `model.to_json()` instead.')", + "docstring": "Returns a yaml string containing the network configuration. Note: Since TF 2.6, this method is no longer supported and will raise a RuntimeError. To load a network from a yaml save file, use . should be a dictionary mapping the names of custom losses / layers / etc to the corresponding functions / classes. Args: **kwargs: Additional keyword arguments to be passed to . Returns: A YAML string. Raises: RuntimeError: announces that the method poses a security risk", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", + "ast_data": "FunctionDef name:to_yaml arg:self arguments arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "convert", + "source_code": "def convert(module, mapping=None, inplace=False, remove_qconfig=True, is_reference=False, convert_custom_config_dict=None, use_precomputed_fake_quant=False):\n torch._C._log_api_usage_once('quantization_api.quantize.convert')\n if not inplace:\n module = copy.deepcopy(module)\n _convert(module, mapping, inplace=True, is_reference=is_reference, convert_custom_config_dict=convert_custom_config_dict, use_precomputed_fake_quant=use_precomputed_fake_quant)\n if remove_qconfig:\n _remove_qconfig(module)\n return module", + "docstring": "Converts submodules in input module to a different module according to by calling method on the target module class. And remove qconfig at the end if remove_qconfig is set to True. Args: : prepared and calibrated module : a dictionary that maps from source module type to target module type, can be overwritten to allow swapping user defined Modules : carry out model transformations in-place, the original module is mutated : custom configuration dictionary for convert function : a flag to enable use of precomputed fake quant .. code-block:: python # Example of convert_custom_config_dict: convert_custom_config_dict = { # user will manually define the corresponding quantized # module class which has a from_observed class method that converts # observed custom module to quantized custom module \"observed_to_quantized_custom_module_class\": { ObservedCustomModule: QuantizedCustomModule } }", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py", + "ast_data": "FunctionDef name:convert arg:module arg:mapping arg:inplace arg:remove_qconfig arg:is_reference arg:convert_custom_config_dict arg:use_precomputed_fake_quant arguments arg arg arg arg arg arg arg Call If Assign Call Call If Call Return return:yes" + }, + { + "library": "scipy", + "name": "savgol_filter", + "source_code": "def savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0):\n if mode not in ['mirror', 'constant', 'nearest', 'interp', 'wrap']:\n raise ValueError(\"mode must be 'mirror', 'constant', 'nearest' 'wrap' or 'interp'.\")\n x = np.asarray(x)\n if x.dtype != np.float64 and x.dtype != np.float32:\n x = x.astype(np.float64)\n coeffs = savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta)\n if mode == 'interp':\n if window_length > x.shape[axis]:\n raise ValueError(\"If mode is 'interp', window_length must be less than or equal to the size of x.\")\n y = convolve1d(x, coeffs, axis=axis, mode='constant')\n _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y)\n else:\n y = convolve1d(x, coeffs, axis=axis, mode=mode, cval=cval)\n return y", + "docstring": "Apply a Savitzky-Golay filter to an array. This is a 1-D filter. If has dimension greater than 1, determines the axis along which the filter is applied. Parameters ---------- x : array_like The data to be filtered. If is not a single or double precision floating point array, it will be converted to type `modewindow_lengthxpolyorderwindow_lengthxmodecvalpolyorderwindow_lengthwindow_length // 2modexmodecvalwindow_lengthmodecvalmode='nearest'`: >>> savgol_filter(x, 5, 2, mode='nearest') array([1.74, 3.03, 3.54, 2.86, 0.66, 0.17, 1. , 4.6 , 7.97])", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_savitzky_golay.py", + "ast_data": "FunctionDef name:savgol_filter arg:x arg:window_length arg:polyorder arg:deriv arg:delta arg:axis arg:mode arg:cval arguments arg arg arg arg arg arg arg arg If Compare Raise Call Assign Call If BoolOp Compare Compare Assign Call Assign Call If Compare If Compare Raise Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_add_train_op", + "source_code": "def _add_train_op(self, train_op):\n if train_op is not None:\n if not isinstance(train_op, tensor.Tensor) and (not isinstance(train_op, ops.Operation)):\n raise TypeError(f'`train_op` {train_op} needs to be a Tensor or Op.')\n ops.add_to_collection(constants.TRAIN_OP_KEY, train_op)", + "docstring": "Add train op to the SavedModel. Note that this functionality is in development, and liable to be moved elsewhere. Args: train_op: Op or group of ops that are used for training. These are stored as a collection with key TRAIN_OP_KEY, but not executed. Raises: TypeError if Train op is not of type .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py", + "ast_data": "FunctionDef name:_add_train_op arg:self arg:train_op arguments arg arg If Compare If BoolOp Call Call Raise Call Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, id, len, file, extra=None, png=None):\n self.id = id\n self.len = len\n self.pdfFile = file\n self.file = file.fh\n self.compressobj = None\n if extra is None:\n self.extra = dict()\n else:\n self.extra = extra.copy()\n if png is not None:\n self.extra.update({'Filter': Name('FlateDecode'), 'DecodeParms': png})\n self.pdfFile.recordXref(self.id)\n if mpl.rcParams['pdf.compression'] and (not png):\n self.compressobj = zlib.compressobj(mpl.rcParams['pdf.compression'])\n if self.len is None:\n self.file = BytesIO()\n else:\n self._writeHeader()\n self.pos = self.file.tell()", + "docstring": "Parameters ---------- id : int Object id of the stream. len : Reference or None An unused Reference object for the length of the stream; None means to use a memory buffer so the length can be inlined. file : PdfFile The underlying object to write the stream to. extra : dict from Name to anything, or None Extra key-value pairs to include in the stream header. png : dict or None If the data is already png encoded, the decode parameters.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:id arg:len arg:file arg:extra arg:png arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign If Compare Assign Call Assign Call If Compare Call Call Call If BoolOp Assign Call If Compare Assign Call Call Assign Call" + }, + { + "library": "pytorch", + "name": "_save_to_state_dict", + "source_code": "def _save_to_state_dict(self, destination, prefix, keep_vars):\n for name, param in self._parameters.items():\n if param is not None:\n destination[prefix + name] = param if keep_vars else param.detach()\n for name, buf in self._buffers.items():\n if buf is not None and name not in self._non_persistent_buffers_set:\n destination[prefix + name] = buf if keep_vars else buf.detach()\n extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX\n if getattr(self.__class__, 'get_extra_state', Module.get_extra_state) is not Module.get_extra_state:\n destination[extra_state_key] = self.get_extra_state()", + "docstring": "Save module state to the dictionary. The dictionary will contain the state of the module, but not its descendants. This is called on every submodule in :meth:. In rare cases, subclasses can achieve class-specific behavior by overriding this method with custom logic. Args: destination (dict): a dict where state will be stored prefix (str): the prefix for parameters and buffers used in this module", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:_save_to_state_dict arg:self arg:destination arg:prefix arg:keep_vars arguments arg arg arg arg For Call If Compare Assign Call For Call If BoolOp Compare Compare Assign Call Assign If Compare Call Assign Call" + }, + { + "library": "tensorflow", + "name": "get_regularization_losses", + "source_code": "@tf_export(v1=['losses.get_regularization_losses'])\ndef get_regularization_losses(scope=None):\n return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope)", + "docstring": "Gets the list of regularization losses. Args: scope: An optional scope name for filtering the losses to return. Returns: A list of regularization losses as Tensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\util.py", + "ast_data": "FunctionDef name:get_regularization_losses arg:scope arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "isscalar", + "source_code": "def isscalar(val):\n if isinstance(val, np_arrays.ndarray):\n val = val.data\n if isinstance(val, core.Tensor):\n ndims = val.shape.ndims\n if ndims is not None:\n return ndims == 0\n else:\n return math_ops.equal(array_ops.rank(val), 0)\n else:\n return np.isscalar(val)", + "docstring": "Returns whether is a scalar value or scalar Tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py", + "ast_data": "FunctionDef name:isscalar arg:val arguments arg If Call Assign If Call Assign If Compare Return return:yes Compare Return return:yes Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "tostring", + "source_code": "def tostring(self, encoding) -> str:\n if self.kind == 'string':\n if encoding is not None:\n return str(self.converted)\n return f'\"{self.converted}\"'\n elif self.kind == 'float':\n return repr(self.converted)\n return str(self.converted)", + "docstring": "quote the string if not encoded else encode and return", + "type": "method", + "file_path": "pandas\\pandas\\core\\computation\\pytables.py", + "ast_data": "FunctionDef name:tostring arg:self arg:encoding arguments arg arg If Compare If Compare Return return:yes Call Return return:yes If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "assign_on_each_device", + "source_code": "def assign_on_each_device(var, assign_func, value, read_value):\n if var._packed_variable is not None:\n update = control_flow_ops.group(tuple((assign_func(d, var._packed_variable, value) for d in var._devices)))\n else:\n update = control_flow_ops.group(tuple((assign_func(v.device, v, value) for v in var._values)))\n if not read_value:\n return update\n with ops.control_dependencies([update] if update else []):\n return var.read_value()", + "docstring": "Update the variable on each replica with the given assign_func and value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values_util.py", + "ast_data": "FunctionDef name:assign_on_each_device arg:var arg:assign_func arg:value arg:read_value arguments arg arg arg arg If Compare Assign Call Call Call Assign Call Call Call If Return return:yes With Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "OpMetadata", + "source_code": "class OpMetadata:\n __slots__ = ('op_type', 'op_name', 'source_file', 'source_line')\n\n def __init__(self, op_type='', op_name='', source_file='', source_line=0):\n self.op_type = op_type\n self.op_name = op_name\n self.source_file = source_file\n self.source_line = source_line", + "docstring": "Python representation of a xla.OpMetadata protobuf.", + "type": "class", + "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py", + "ast_data": "ClassDef name:OpMetadata Assign FunctionDef name:__init__ arg:self arg:op_type arg:op_name arg:source_file arg:source_line arguments arg arg arg arg arg Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "_get_dense_tensor_internal", + "source_code": "def _get_dense_tensor_internal(self, sparse_tensors, state_manager):\n embedding_weights = state_manager.get_variable(self, name='embedding_weights')\n return self._get_dense_tensor_internal_helper(sparse_tensors, embedding_weights)", + "docstring": "Private method that follows the signature of get_dense_tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:_get_dense_tensor_internal arg:self arg:sparse_tensors arg:state_manager arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "hardtanh", + "source_code": "def hardtanh(input: Tensor, min_val: float=-1.0, max_val: float=1.0, inplace: bool=False) -> Tensor:\n if not input.is_quantized:\n raise ValueError(\"Input to 'quantized.hardtanh' must be quantized!\")\n if inplace:\n return torch._C._nn.hardtanh_(input, min_val, max_val)\n return torch._C._nn.hardtanh(input, min_val, max_val)", + "docstring": "This is the quantized version of :func:.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py", + "ast_data": "FunctionDef name:hardtanh arg:input arg:min_val arg:max_val arg:inplace arguments arg arg arg arg If Raise Call If Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_split_by_task", + "source_code": "def _split_by_task(devices, values):\n num_devices = len(devices)\n if num_devices != len(values):\n raise ValueError('len(devices) must equal len(values)')\n per_task_devices = collections.OrderedDict()\n per_task_values = collections.OrderedDict()\n for d in range(num_devices):\n d_spec = device_lib.DeviceSpec.from_string(devices[d])\n if not hasattr(d_spec, 'task') or d_spec.task is None:\n assert False, 'failed to parse device %s' % devices[d]\n index = (d_spec.job or 'localhost', d_spec.replica or 0, d_spec.task)\n if index not in per_task_devices:\n per_task_devices[index] = []\n per_task_values[index] = []\n per_task_devices[index].append(devices[d])\n per_task_values[index].append(values[d])\n return (list(per_task_devices.values()), list(per_task_values.values()))", + "docstring": "Partition devices and values by common task. Args: devices: list of device name strings values: list of of same length as devices. Returns: (per_task_devices, per_task_values) where both values are lists of lists with isomorphic structure: the outer list is indexed by task, and the inner list has length of the number of values belonging to that task. per_task_devices contains the specific devices to which the values are local, and per_task_values contains the corresponding values. Raises: ValueError: devices must be same length as values.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py", + "ast_data": "FunctionDef name:_split_by_task arg:devices arg:values arguments arg arg Assign Call If Compare Call Raise Call Assign Call Assign Call For Call Assign Call If BoolOp Call Compare Assign BoolOp BoolOp If Compare Assign Assign Call Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_kl_bernoulli_bernoulli", + "source_code": "@kullback_leibler.RegisterKL(Bernoulli, Bernoulli)\ndef _kl_bernoulli_bernoulli(a, b, name=None):\n with ops.name_scope(name, 'kl_bernoulli_bernoulli', values=[a.logits, b.logits]):\n delta_probs0 = nn.softplus(-b.logits) - nn.softplus(-a.logits)\n delta_probs1 = nn.softplus(b.logits) - nn.softplus(a.logits)\n return math_ops.sigmoid(a.logits) * delta_probs0 + math_ops.sigmoid(-a.logits) * delta_probs1", + "docstring": "Calculate the batched KL divergence KL(a || b) with a and b Bernoulli. Args: a: instance of a Bernoulli distribution object. b: instance of a Bernoulli distribution object. name: (optional) Name to use for created operations. default is \"kl_bernoulli_bernoulli\". Returns: Batchwise KL(a || b)", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bernoulli.py", + "ast_data": "FunctionDef name:_kl_bernoulli_bernoulli arg:a arg:b arg:name arguments arg arg arg With Call Assign Call Call Assign Call Call Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "get_useLocale", + "source_code": "def get_useLocale(self):\n return self._useLocale", + "docstring": "Return whether locale settings are used for formatting. See Also -------- ScalarFormatter.set_useLocale", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:get_useLocale arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "bar", + "source_code": "@_preprocess_data()\ndef bar(self, left, height, zs=0, zdir='z', *args, axlim_clip=False, **kwargs):\n had_data = self.has_data()\n patches = super().bar(left, height, *args, **kwargs)\n zs = np.broadcast_to(zs, len(left), subok=True)\n verts = []\n verts_zs = []\n for p, z in zip(patches, zs):\n vs = art3d._get_patch_verts(p)\n verts += vs.tolist()\n verts_zs += [z] * len(vs)\n art3d.patch_2d_to_3d(p, z, zdir, axlim_clip)\n if 'alpha' in kwargs:\n p.set_alpha(kwargs['alpha'])\n if len(verts) > 0:\n xs, ys = zip(*verts)\n else:\n xs, ys = ([], [])\n xs, ys, verts_zs = art3d.juggle_axes(xs, ys, verts_zs, zdir)\n self.auto_scale_xyz(xs, ys, verts_zs, had_data)\n return patches", + "docstring": "Add 2D bar(s). Parameters ---------- left : 1D array-like The x coordinates of the left sides of the bars. height : 1D array-like The height of the bars. zs : float or 1D array-like, default: 0 Z coordinate of bars; if a single value is specified, it will be used for all bars. zdir : {'x', 'y', 'z'}, default: 'z' When plotting 2D data, the direction to use as z ('x', 'y' or 'z'). axlim_clip : bool, default: False Whether to hide bars with points outside the axes view limits. .. versionadded:: 3.10 data : indexable object, optional DATA_PARAMETER_PLACEHOLDER **kwargs Other keyword arguments are forwarded to . Returns ------- mpl_toolkits.mplot3d.art3d.Patch3DCollection", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", + "ast_data": "FunctionDef name:bar arg:self arg:left arg:height arg:zs arg:zdir arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Call Assign Call Call Assign Assign For Call Assign Call Call Call Call If Compare Call If Compare Call Assign Call Assign Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_create_all_weights", + "source_code": "def _create_all_weights(self, params):\n raise NotImplementedError", + "docstring": "Creates and sets all optimizer weights. Args: params: list or tuple of objects that will be minimized using this optimizer. Returns: Specific weight values that are used in", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v1.py", + "ast_data": "FunctionDef name:_create_all_weights arg:self arg:params arguments arg arg Raise" + }, + { + "library": "tensorflow", + "name": "segment_mean", + "source_code": "@dispatch.dispatch_for_api(math_ops.unsorted_segment_mean)\ndef segment_mean(data: ragged_tensor.RaggedOrDense, segment_ids: ragged_tensor.RaggedOrDense, num_segments, name=None):\n with ops.name_scope(name, 'RaggedSegmentMean', [data, segment_ids, num_segments]):\n total = segment_sum(data, segment_ids, num_segments)\n ones = ragged_tensor.RaggedTensor.from_nested_row_splits(array_ops.ones_like(data.flat_values), data.nested_row_splits, validate=False)\n count = segment_sum(ones, segment_ids, num_segments)\n if ragged_tensor.is_ragged(total):\n return total.with_flat_values(total.flat_values / count.flat_values)\n else:\n return total / count", + "docstring": "For docs, see: _RAGGED_SEGMENT_DOCSTRING.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py", + "ast_data": "FunctionDef name:segment_mean arg:data arg:segment_ids arg:num_segments arg:name arguments arg arg arg arg With Call Assign Call Assign Call Call Assign Call If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "get_option_single", + "source_code": "def get_option_single(self, *options):\n found = [self.cp.has_option(self.section, opt) for opt in options]\n if sum(found) == 1:\n return options[found.index(True)]\n elif sum(found) == 0:\n return options[0]\n if AliasedOptionError.__doc__ is None:\n raise AliasedOptionError()\n raise AliasedOptionError(AliasedOptionError.__doc__.format(section=self.section, options='[{}]'.format(', '.join(options))))", + "docstring": "Ensure that only one of are found in the section Parameters ---------- *options : list of str a list of options to be found in the section (``) Returns ------- str : the option that is uniquely found in the section Raises ------ AliasedOptionError : in case more than one of the options are found", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\system_info.py", + "ast_data": "FunctionDef name:get_option_single arg:self arguments arg arg Assign Call If Compare Call Return return:yes Call If Compare Call Return return:yes If Compare Raise Call Raise Call Call Call Call" + }, + { + "library": "pytorch", + "name": "create_placeholder", + "source_code": "def create_placeholder(name: str, dtype: torch.dtype, device: torch.device) -> TensorBox:\n input_buffer = InputBuffer(name=name, layout=FixedLayout(device, dtype, [], []))\n return TensorBox.create(input_buffer)", + "docstring": "Creates a placeholder input buffers for producing subgraph_output", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\b2b_gemm.py", + "ast_data": "FunctionDef name:create_placeholder arg:name arg:dtype arg:device arguments arg arg arg Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_rewrite_input_as_indexed_slices", + "source_code": "def _rewrite_input_as_indexed_slices(body_grad_graph, grad_output_slices, forward_input, loop_vars):\n init_slices = _create_grad_indexed_slices_init(grad_output_slices, forward_input)\n with body_grad_graph.as_default():\n input_slices = indexed_slices.IndexedSlices(values=body_grad_graph.capture(init_slices.values, allowlisted=True), indices=body_grad_graph.capture(init_slices.indices, allowlisted=True), dense_shape=body_grad_graph.capture(init_slices.dense_shape, allowlisted=True))\n for t in _flatten(init_slices):\n captured_t = body_grad_graph.captures.pop(t)\n body_grad_graph.inputs.remove(captured_t)\n new_output_slices = _rewrite_grad_indexed_slices_output(grad_output_slices, input_slices)\n return _update_indexed_slices_param(body_grad_graph, loop_vars, init_slices, input_slices, new_output_slices, grad_output_slices)", + "docstring": "Rewrites grad_output_slices's corresponding input to be an IndexedSlices. This rewrite requires that forward_input was captured in the forward loop, i.e. is not a user-specified loop variable. This is important because the rewrite assumes that forward_input is passed through to its corresponding output unchanged. This assumption is used in _rewrite_input_as_indexed_slices, which depends on the exact gradient structure produced by the input's fanout. This can yield a more efficient computation than using _rewrite_output_as_tensor, since it preserves the IndexedSlices structure instead of converting the IndexedSlices to a dense Tensor. Args: body_grad_graph: _WhileBodyGradFuncGraph. grad_output_slices: IndexedSlices output of body_grad_graph. forward_input: the corresponding Tensor input to the forward loop. loop_vars: list of Tensors. The inputs to body_grad_graph. Returns: The new loop_vars to pass to body_grad_graph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2_indexed_slices_rewriter.py", + "ast_data": "FunctionDef name:_rewrite_input_as_indexed_slices arg:body_grad_graph arg:grad_output_slices arg:forward_input arg:loop_vars arguments arg arg arg arg Assign Call With Call Assign Call Call Call Call For Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_sample_kmc2_chain", + "source_code": "def _sample_kmc2_chain():\n start = i * self._kmc2_chain_length\n end = start + self._kmc2_chain_length\n subset = first_shard[start:end]\n _, distances = gen_clustering_ops.nearest_neighbors(subset, self._cluster_centers, 1)\n new_center_index = gen_clustering_ops.kmc2_chain_initialization(array_ops.squeeze(distances), self._seed)\n newly_sampled_center = array_ops.reshape(subset[new_center_index], [1, -1])\n if self._distance_metric == COSINE_DISTANCE:\n newly_sampled_center = nn_impl.l2_normalize(newly_sampled_center, dim=1)\n return array_ops.concat([self._cluster_centers, newly_sampled_center], 0)", + "docstring": "Returns previous centers as well as a new center sampled using k-MC2.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py", + "ast_data": "FunctionDef name:_sample_kmc2_chain arguments Assign Assign Assign Assign Call Assign Call Call Assign Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "assert_rank_at_least_v2", + "source_code": "@tf_export('debugging.assert_rank_at_least', v1=[])\n@dispatch.add_dispatch_support\ndef assert_rank_at_least_v2(x, rank, message=None, name=None):\n return assert_rank_at_least(x=x, rank=rank, message=message, name=name)", + "docstring": "Assert that has rank of at least . This Op checks that the rank of is greater or equal to . If has a rank lower than , , as well as the shape of are printed, and is raised. Args: x: . rank: Scalar integer . message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to \"assert_rank_at_least\". Returns: Op raising unless has specified rank or higher. If static checks determine has correct rank, a is returned. This can be used with inside of s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: does not have rank at least , but the rank cannot be statically determined. ValueError: If static checks determine has mismatched rank.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py", + "ast_data": "FunctionDef name:assert_rank_at_least_v2 arg:x arg:rank arg:message arg:name arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "set_optimizer_jit", + "source_code": "@tf_export('config.optimizer.set_jit')\n@deprecation.deprecated_arg_values(None, '`True` setting is deprecated, use `autoclustering` instead.', warn_once=True, jit_config=True)\ndef set_optimizer_jit(enabled: Union[bool, str]):\n autoclustering_enabled = enabled in (True, 'autoclustering')\n context.context().optimizer_jit = autoclustering_enabled", + "docstring": "Configure JIT compilation. Note: compilation is only applied to code that is compiled into a graph (in TF2 that's only a code inside ). Args: enabled: JIT compilation configuration. Possible values: - ( is a deprecated alias): perform [autoclustering]( (automatically identify and compile clusters of nodes) on all graphs using [XLA]( - : do not automatically compile any graphs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py", + "ast_data": "FunctionDef name:set_optimizer_jit arg:enabled arguments arg Assign Compare Assign Call Call Call" + }, + { + "library": "sphinx", + "name": "correct_copyright_year", + "source_code": "def correct_copyright_year(_app: Sphinx, config: Config) -> None:\n if (source_date_epoch := int(getenv('SOURCE_DATE_EPOCH', '0'))):\n source_date_epoch_year = time.gmtime(source_date_epoch).tm_year\n else:\n return\n current_year = time.localtime().tm_year\n if current_year <= source_date_epoch_year:\n return\n current_yr = str(current_year)\n replace_yr = str(source_date_epoch_year)\n for k in ('copyright', 'epub_copyright'):\n if k in config:\n value: str | Sequence[str] = config[k]\n if isinstance(value, str):\n config[k] = _substitute_copyright_year(value, current_yr, replace_yr)\n else:\n items = (_substitute_copyright_year(x, current_yr, replace_yr) for x in value)\n config[k] = type(value)(items)", + "docstring": "Correct values of copyright year that are not coherent with the SOURCE_DATE_EPOCH environment variable (if set) See", + "type": "function", + "file_path": "sphinx\\sphinx\\config.py", + "ast_data": "FunctionDef name:correct_copyright_year arg:_app arg:config arguments arg arg If Call Call Assign Call Return return:no Assign Call If Compare Return return:no Assign Call Assign Call For If Compare If Call Assign Call Assign Call Assign Call Call" + }, + { + "library": "matplotlib", + "name": "recursive_subclasses", + "source_code": "def recursive_subclasses(cls):\n yield cls\n for subcls in cls.__subclasses__():\n yield from recursive_subclasses(subcls)", + "docstring": "Yield *cls* and direct and indirect subclasses of *cls*.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\_api\\__init__.py", + "ast_data": "FunctionDef name:recursive_subclasses arg:cls arguments arg For Call Call" + }, + { + "library": "pytorch", + "name": "impl_backward", + "source_code": "def impl_backward(self, output_differentiability=None, _stacklevel=2):\n if output_differentiability is not None:\n\n def yell():\n raise RuntimeError(f'impl_backward(output_differentiability): expected output_differentiability to be a list of bools with length equal to the number of outputs of this CustomOp got: {output_differentiability}')\n if not isinstance(output_differentiability, list):\n yell()\n for diff in output_differentiability:\n if not isinstance(diff, bool):\n yell()\n if len(self._schema.returns) != len(output_differentiability):\n yell()\n\n def inner(f):\n self._check_can_register_backward()\n self._check_doesnt_have_library_autograd_impl()\n if not self._registered_autograd_kernel_indirection:\n self._register_autograd_kernel_indirection()\n self._register_impl('backward', f, stacklevel=_stacklevel)\n self._output_differentiability = output_differentiability\n if self._has_impl('save_for_backward'):\n self._register_autograd_kernel()\n return inner", + "docstring": "This API is deprecated, please use torch.library.custom_op instead", + "type": "method", + "file_path": "pytorch\\torch\\_custom_op\\impl.py", + "ast_data": "FunctionDef name:impl_backward arg:self arg:output_differentiability arg:_stacklevel arguments arg arg arg If Compare FunctionDef name:yell arguments Raise Call If Call Call For If Call Call If Compare Call Call Call FunctionDef name:inner arg:f arguments arg Call Call If Call Call Assign If Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_updates_for", + "source_code": "def get_updates_for(self, inputs):\n if inputs is None:\n return [u for u in self.updates if u._unconditional_update]\n updates = [u for u in self.updates if not u._unconditional_update]\n inputs = nest.flatten(inputs)\n reachable = tf_utils.get_reachable_from_inputs(inputs, updates)\n return [u for u in updates if u in reachable]", + "docstring": "Retrieves updates relevant to a specific set of inputs. Args: inputs: Input tensor or list/tuple of input tensors. Returns: List of update ops of the layer that depend on .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", + "ast_data": "FunctionDef name:get_updates_for arg:self arg:inputs arguments arg arg If Compare Return return:yes Assign Assign Call Assign Call Return return:yes Compare" + }, + { + "library": "numpy", + "name": "njoin", + "source_code": "def njoin(*path):\n paths = []\n for p in path:\n if is_sequence(p):\n paths.append(njoin(*p))\n else:\n assert is_string(p)\n paths.append(p)\n path = paths\n if not path:\n joined = ''\n else:\n joined = os.path.join(*path)\n if os.path.sep != '/':\n joined = joined.replace('/', os.path.sep)\n return minrelpath(joined)", + "docstring": "Join two or more pathname components + - convert a /-separated pathname to one using the OS's path separator. - resolve and from path. Either passing n arguments as in njoin('a','b'), or a sequence of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\misc_util.py", + "ast_data": "FunctionDef name:njoin arguments arg Assign For If Call Call Call Call Call Assign If Assign Assign Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "@_docstring.interpd\ndef __init__(self, xy, width, height, *, angle=0, **kwargs):\n super().__init__(**kwargs)\n self._center = xy\n self._width, self._height = (width, height)\n self._angle = angle\n self._path = Path.unit_circle()\n self._aspect_ratio_correction = 1.0\n self._patch_transform = transforms.IdentityTransform()", + "docstring": "Parameters ---------- xy : (float, float) xy coordinates of ellipse centre. width : float Total length (diameter) of horizontal axis. height : float Total length (diameter) of vertical axis. angle : float, default: 0 Rotation in degrees anti-clockwise. Notes ----- Valid keyword arguments are: %(Patch:kwdoc)s", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:xy arg:width arg:height arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Call Assign Assign Call" + }, + { + "library": "scikit-learn", + "name": "get_feature_names_out", + "source_code": "def get_feature_names_out(self, input_features=None):\n check_is_fitted(self, 'n_features_in_')\n input_features = _check_feature_names_in(self, input_features)\n names = input_features[self._valid_mask]\n return self._concatenate_indicator_feature_names_out(names, input_features)", + "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If is , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\impute\\_knn.py", + "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Call Assign Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit_transform", + "source_code": "def fit_transform(self, X, y=None):\n return self.fit(X, y).transform(X, y)", + "docstring": "Learn and apply the dimensionality reduction. Parameters ---------- X : array-like of shape (n_samples, n_features) Training samples. y : array-like of shape (n_samples,) or (n_samples, n_targets), default=None Targets. Returns ------- out : array-like or tuple of array-like The transformed data if , otherwise.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cross_decomposition\\_pls.py", + "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "_f_direction", + "source_code": "@property\ndef _f_direction(self):\n return self._f_dir_from_t(self.t_direction)", + "docstring": "The direction that is other than .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:_f_direction arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "call", + "source_code": "@doc_controls.do_not_doc_inheritable\ndef call(self, inputs, training=None, mask=None):\n return self._run_internal_graph(inputs, training=training, mask=mask)", + "docstring": "Calls the model on new inputs. In this case just reapplies all ops in the graph to the new inputs (e.g. build a new computational graph from the provided inputs). Args: inputs: A tensor or list of tensors. training: Boolean or boolean scalar tensor, indicating whether to run the in training mode or inference mode. mask: A mask or list of masks. A mask can be either a tensor or None (no mask). Returns: A tensor if there is a single output, or a list of tensors if there are more than one outputs.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py", + "ast_data": "FunctionDef name:call arg:self arg:inputs arg:training arg:mask arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "erlang_gen", + "source_code": "class erlang_gen(gamma_gen):\n\n def _argcheck(self, a):\n allint = np.all(np.floor(a) == a)\n if not allint:\n message = f'The shape parameter of the erlang distribution has been given a non-integer value {a!r}.'\n warnings.warn(message, RuntimeWarning, stacklevel=3)\n return a > 0\n\n def _shape_info(self):\n return [_ShapeInfo('a', True, (1, np.inf), (True, False))]\n\n def _fitstart(self, data):\n if isinstance(data, CensoredData):\n data = data._uncensor()\n a = int(4.0 / (1e-08 + _skew(data) ** 2))\n return super(gamma_gen, self)._fitstart(data, args=(a,))\n\n @extend_notes_in_docstring(rv_continuous, notes=' The Erlang distribution is generally defined to have integer values\\n for the shape parameter. This is not enforced by the `erlang` class.\\n When fitting the distribution, it will generally return a non-integer\\n value for the shape parameter. By using the keyword argument\\n `f0=`, the fit method can be constrained to fit the data to\\n a specific integer shape parameter.')\n def fit(self, data, *args, **kwds):\n return super().fit(data, *args, **kwds)", + "docstring": "An Erlang continuous random variable. %(before_notes)s See Also -------- gamma Notes ----- The Erlang distribution is a special case of the Gamma distribution, with the shape parameter an integer. Note that this restriction is not enforced by . It will, however, generate a warning the first time a non-integer value is used for the shape parameter. Refer to for examples.", + "type": "class", + "file_path": "scipy\\scipy\\stats\\_continuous_distns.py", + "ast_data": "ClassDef name:erlang_gen FunctionDef name:_argcheck arg:self arg:a arguments arg arg Assign Call Compare Call If Assign Call Return return:yes Compare FunctionDef name:_shape_info arg:self arguments arg Return return:yes Call FunctionDef name:_fitstart arg:self arg:data arguments arg arg If Call Assign Call Assign Call Call Return return:yes Call Call FunctionDef name:fit arg:self arg:data arguments arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "call_function", + "source_code": "def call_function(self, name, tensor_inputs, num_outputs):\n attrs = tuple(itertools.chain(*self.function_call_options.as_attrs().items()))\n cancellation_context = cancellation.context()\n if cancellation_context is None:\n outputs = execute.execute(name.decode('utf-8'), num_outputs=num_outputs, inputs=tensor_inputs, attrs=attrs, ctx=self)\n else:\n outputs = execute.execute_with_cancellation(name.decode('utf-8'), num_outputs=num_outputs, inputs=tensor_inputs, attrs=attrs, ctx=self, cancellation_manager=cancellation_context)\n outputs = outputs or None\n return outputs", + "docstring": "Calls the function associated with the given name.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:call_function arg:self arg:name arg:tensor_inputs arg:num_outputs arguments arg arg arg arg Assign Call Call Call Call Assign Call If Compare Assign Call Call Assign Call Call Assign BoolOp Return return:yes" + }, + { + "library": "pytorch", + "name": "set_stance", + "source_code": "class set_stance(_DecoratorContextManager):\n _dynamo_forbidden = True\n\n def __init__(self, stance: str='default', *, skip_guard_eval_unsafe: bool=False, force_backend=None) -> None:\n if force_backend is not None and stance != 'default':\n raise RuntimeError('non-default stance cannot have force_backend set')\n self.stance = DynamoStance(stance, skip_guard_eval_unsafe, force_backend)\n self.prev = _set_stance(self.stance)\n\n def __call__(self, fn):\n _set_stance(self.prev)\n wrapper = super().__call__(fn)\n wrapper._dynamo_forbidden = True\n return wrapper\n\n def __enter__(self):\n _set_stance(self.stance)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n _set_stance(self.prev)\n\n def clone(self):\n return self.__class__(self.stance.stance, force_backend=self.stance.backend)", + "docstring": "Decorator, context manager, function to set the current stance of the compiler. Stances documented in corresponding function in torch/compiler/__init__.py", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\decorators.py", + "ast_data": "ClassDef name:set_stance Assign FunctionDef name:__init__ arg:self arg:stance arguments arg arg arg arg If BoolOp Compare Compare Raise Call Assign Call Assign Call FunctionDef name:__call__ arg:self arg:fn arguments arg arg Call Assign Call Call Assign Return return:yes FunctionDef name:__enter__ arg:self arguments arg Call FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg Call FunctionDef name:clone arg:self arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "Gray", + "source_code": "class Gray(OperationBase):\n\n def __init__(self, initial_probability: float=0.5, temperature: float=0.1) -> None:\n super().__init__(K.RandomGrayscale(same_on_batch=False, p=initial_probability), initial_magnitude=None, temperature=temperature, symmetric_megnitude=False)", + "docstring": "Apply grayscale operation. Args: initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. temperature: temperature for RelaxedBernoulli distribution used during training.", + "type": "class", + "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py", + "ast_data": "ClassDef name:Gray FunctionDef name:__init__ arg:self arg:initial_probability arg:temperature arguments arg arg arg Call Call Call" + }, + { + "library": "scipy", + "name": "traceest", + "source_code": "def traceest(A, m3, seed=None):\n rng = np.random.default_rng(seed)\n if len(A.shape) != 2 or A.shape[-1] != A.shape[-2]:\n raise ValueError('Expected A to be like a square matrix.')\n n = A.shape[-1]\n S = rng.choice([-1.0, +1.0], [n, m3])\n Q, _ = qr(A.matmat(S), overwrite_a=True, mode='economic')\n trQAQ = np.trace(Q.conj().T @ A.matmat(Q))\n G = rng.choice([-1, +1], [n, m3])\n right = G - Q @ (Q.conj().T @ G)\n trGAG = np.trace(right.conj().T @ A.matmat(right))\n return trQAQ + trGAG / m3", + "docstring": "Estimate using matrix-vector products. The result is not deterministic. Parameters ---------- A : LinearOperator Linear operator whose trace will be estimated. Has to be square. m3 : int Number of matrix-vector products divided by 3 used to estimate the trace. seed : optional Seed for . Can be provided to obtain deterministic results. Returns ------- trace : LinearOperator.dtype Estimate of the trace Notes ----- This is the Hutch++ algorithm given in [1]_. References ---------- .. [1] Meyer, Raphael A., Cameron Musco, Christopher Musco, and David P. Woodruff. \"Hutch++: Optimal Stochastic Trace Estimation.\" In Symposium on Simplicity in Algorithms (SOSA), pp. 142-155. Society for Industrial and Applied Mathematics, 2021", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py", + "ast_data": "FunctionDef name:traceest arg:A arg:m3 arg:seed arguments arg arg arg Assign Call If BoolOp Compare Call Compare Raise Call Assign Assign Call Assign Call Call Assign Call Call Call Assign Call Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "isinv", + "source_code": "def isinv(A, B, tol=None):\n n = np.size(A, 0)\n if DEBUGGING:\n assert np.size(A, 0) == np.size(A, 1)\n assert np.size(B, 0) == np.size(B, 1)\n assert np.size(A, 0) == np.size(B, 0)\n if present(tol):\n assert tol >= 0\n tol = tol if present(tol) else np.minimum(0.001, 100.0 * EPS * np.maximum(np.size(A, 0), np.size(A, 1)))\n tol = np.max([tol, tol * np.max(abs(A)), tol * np.max(abs(B))])\n is_inv = (abs(matprod(A, B)) - np.eye(n) <= tol).all() or (abs(matprod(B, A) - np.eye(n)) <= tol).all()\n return is_inv", + "docstring": "This procedure tests whether A = B^{-1} up to the tolerance TOL.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\linalg.py", + "ast_data": "FunctionDef name:isinv arg:A arg:B arg:tol arguments arg arg arg Assign Call If Compare Call Call Compare Call Call Compare Call Call If Call Compare Assign Call Call Call Call Call Assign Call Call Call Call Call Assign BoolOp Call Compare Call Call Call Call Compare Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "assert_consumed", + "source_code": "@abc.abstractmethod\ndef assert_consumed(self):\n pass", + "docstring": "Raises an exception unless a non-trivial restoration has completed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:assert_consumed arg:self arguments arg" + }, + { + "library": "pandas", + "name": "ValueLabelTypeMismatch", + "source_code": "class ValueLabelTypeMismatch(Warning):\n pass", + "docstring": "Warning raised by to_stata on a category column that contains non-string values. When exporting data to Stata format using the method, category columns must have string values as labels. If a category column contains non-string values (e.g., integers, floats, or other types), this warning is raised to indicate that the Stata file may not correctly represent the data. See Also -------- DataFrame.to_stata : Export DataFrame object to Stata dta format. Series.cat : Accessor for categorical properties of the Series values. Examples -------- >>> df = pd.DataFrame({\"categories\": pd.Series([\"a\", 2], dtype=\"category\")}) >>> df.to_stata(\"test\") # doctest: +SKIP", + "type": "class", + "file_path": "pandas\\pandas\\errors\\__init__.py", + "ast_data": "ClassDef name:ValueLabelTypeMismatch" + }, + { + "library": "pytorch", + "name": "bfloat16", + "source_code": "def bfloat16(self):\n _warn_typed_storage_removal()\n return self._to(torch.bfloat16)", + "docstring": "Casts this storage to bfloat16 type.", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:bfloat16 arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_debug_get_cache_entry_list", + "source_code": "def _debug_get_cache_entry_list(code: Union[types.CodeType, Callable[..., Any]]) -> list[CacheEntry]:\n if callable(code):\n code = code.__code__\n return torch._C._dynamo.eval_frame._debug_get_cache_entry_list(code)", + "docstring": "Given a code object or a callable object, retrieve the cache entries stored in this code.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\eval_frame.py", + "ast_data": "FunctionDef name:_debug_get_cache_entry_list arg:code arguments arg If Call Assign Return return:yes Call" + }, + { + "library": "scrapy", + "name": "_fire_response_deferred", + "source_code": "def _fire_response_deferred(self) -> None:\n body = self._response['body'].getvalue()\n response_cls = responsetypes.from_args(headers=self._response['headers'], url=self._request.url, body=body)\n response = response_cls(url=self._request.url, status=int(self._response['headers'][':status']), headers=self._response['headers'], body=body, request=self._request, certificate=self._protocol.metadata['certificate'], ip_address=self._protocol.metadata['ip_address'], protocol='h2')\n self._deferred_response.callback(response)", + "docstring": "Builds response from the self._response dict and fires the response deferred callback with the generated response instance", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\http2\\stream.py", + "ast_data": "FunctionDef name:_fire_response_deferred arg:self arguments arg Assign Call Assign Call Assign Call Call Call" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, X):\n check_is_fitted(self)\n X = self._check_inputs(X, in_fit=False, copy=self.copy)\n return self._transform(X, inverse=False)", + "docstring": "Feature-wise transformation of the data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. If a sparse matrix is provided, it will be converted into a sparse `ignore_implicit_zeros` is False. Returns ------- Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) The projected data.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", + "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "make_data", + "source_code": "@abstractmethod\ndef make_data(self, params):\n pass", + "docstring": "Return the dataset for a combination of parameters", + "type": "method", + "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\common.py", + "ast_data": "FunctionDef name:make_data arg:self arg:params arguments arg arg" + }, + { + "library": "scikit-learn", + "name": "count_nonzero", + "source_code": "def count_nonzero(X, axis=None, sample_weight=None):\n if axis == -1:\n axis = 1\n elif axis == -2:\n axis = 0\n elif X.format != 'csr':\n raise TypeError('Expected CSR sparse format, got {0}'.format(X.format))\n if axis is None:\n if sample_weight is None:\n return X.nnz\n else:\n return np.dot(np.diff(X.indptr), sample_weight)\n elif axis == 1:\n out = np.diff(X.indptr)\n if sample_weight is None:\n return out.astype('intp')\n return out * sample_weight\n elif axis == 0:\n if sample_weight is None:\n return np.bincount(X.indices, minlength=X.shape[1])\n else:\n weights = np.repeat(sample_weight, np.diff(X.indptr))\n return np.bincount(X.indices, minlength=X.shape[1], weights=weights)\n else:\n raise ValueError('Unsupported axis: {0}'.format(axis))", + "docstring": "A variant of X.getnnz() with extension to weighting on axis 0. Useful in efficiently calculating multilabel metrics. Parameters ---------- X : sparse matrix of shape (n_samples, n_labels) Input data. It should be of CSR format. axis : {0, 1}, default=None The axis on which the data is aggregated. sample_weight : array-like of shape (n_samples,), default=None Weight for each row of X. Returns ------- nnz : int, float, ndarray of shape (n_samples,) or ndarray of shape (n_features,) Number of non-zero values in the array along a given axis. Otherwise, the total number of non-zero values in the array is returned.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\sparsefuncs.py", + "ast_data": "FunctionDef name:count_nonzero arg:X arg:axis arg:sample_weight arguments arg arg arg If Compare Assign If Compare Assign If Compare Raise Call Call If Compare If Compare Return return:yes Return return:yes Call Call If Compare Assign Call If Compare Return return:yes Call Return return:yes If Compare If Compare Return return:yes Call Assign Call Call Return return:yes Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "_reduce_non_singleton", + "source_code": "def _reduce_non_singleton(input_tensors, red_f, un_op):\n if len(input_tensors) > 1:\n return red_f(input_tensors)\n else:\n if not un_op:\n return input_tensors\n output_tensors = []\n for t in input_tensors:\n with ops.colocate_with(t):\n output_tensors.append(un_op(t))\n return output_tensors", + "docstring": "If len(input_tensors) > 1, apply red_f, else apply un_op.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py", + "ast_data": "FunctionDef name:_reduce_non_singleton arg:input_tensors arg:red_f arg:un_op arguments arg arg arg If Compare Call Return return:yes Call If Return return:yes Assign For With Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_serialize_to_tensors", + "source_code": "def _serialize_to_tensors(self):\n with ops.init_scope():\n value = constant_op.constant(self.serialize(), dtype=dtypes.string)\n return {PYTHON_STATE: value}", + "docstring": "Implements Trackable._serialize_to_tensors.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\python_state.py", + "ast_data": "FunctionDef name:_serialize_to_tensors arg:self arguments arg With Call Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "create_token", + "source_code": "def create_token(self, token_string, position, lineno, in_tag):\n if in_tag:\n token_start = token_string[0:2]\n if token_start == BLOCK_TAG_START:\n content = token_string[2:-2].strip()\n if self.verbatim:\n if content != self.verbatim:\n return Token(TokenType.TEXT, token_string, position, lineno)\n self.verbatim = False\n elif content[:9] in ('verbatim', 'verbatim '):\n self.verbatim = 'end%s' % content\n return Token(TokenType.BLOCK, content, position, lineno)\n if not self.verbatim:\n content = token_string[2:-2].strip()\n if token_start == VARIABLE_TAG_START:\n return Token(TokenType.VAR, content, position, lineno)\n assert token_start == COMMENT_TAG_START\n return Token(TokenType.COMMENT, content, position, lineno)\n return Token(TokenType.TEXT, token_string, position, lineno)", + "docstring": "Convert the given token string into a new Token object and return it. If in_tag is True, we are processing something that matched a tag, otherwise it should be treated as a literal string.", + "type": "method", + "file_path": "django\\django\\template\\base.py", + "ast_data": "FunctionDef name:create_token arg:self arg:token_string arg:position arg:lineno arg:in_tag arguments arg arg arg arg arg If Assign If Compare Assign Call If If Compare Return return:yes Call Assign If Compare Assign Return return:yes Call If Assign Call If Compare Return return:yes Call Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "draw_tex", + "source_code": "def draw_tex(self, gc, x, y, s, prop, angle, *, mtext=None):\n self._draw_text_as_path(gc, x, y, s, prop, angle, ismath='TeX')", + "docstring": "Draw a TeX instance. Parameters ---------- gc : The graphics context. x : float The x location of the text in display coords. y : float The y location of the text baseline in display coords. s : str The TeX text string. prop : The font properties. angle : float The rotation angle in degrees anti-clockwise. mtext : The original text object to be rendered.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:draw_tex arg:self arg:gc arg:x arg:y arg:s arg:prop arg:angle arguments arg arg arg arg arg arg arg arg Call" + }, + { + "library": "pytorch", + "name": "register_bytecode_hook", + "source_code": "def register_bytecode_hook(hook: BytecodeHook) -> RemovableHandle:\n handle = RemovableHandle(_bytecode_hooks)\n _bytecode_hooks[handle.id] = hook\n return handle", + "docstring": "Register hooks for bytecode generated by Dynamo. The hook can do some logging, as well as return a new code object to be used. Please refer to for the hook signature.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\convert_frame.py", + "ast_data": "FunctionDef name:register_bytecode_hook arg:hook arguments arg Assign Call Assign Return return:yes" + }, + { + "library": "kornia", + "name": "_to_uint8", + "source_code": "def _to_uint8(image: Tensor) -> Tensor:\n KORNIA_CHECK(image.dtype == torch.float32)\n return image.mul(255.0).byte()", + "docstring": "Convert an image tensor to uint8.", + "type": "function", + "file_path": "kornia\\kornia\\io\\io.py", + "ast_data": "FunctionDef name:_to_uint8 arg:image arguments arg Call Compare Return return:yes Call Call" + }, + { + "library": "django", + "name": "expected_parameters", + "source_code": "def expected_parameters(self):\n raise NotImplementedError('subclasses of ListFilter must provide an expected_parameters() method')", + "docstring": "Return the list of parameter names that are expected from the request's query string and that will be used by this filter.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\filters.py", + "ast_data": "FunctionDef name:expected_parameters arg:self arguments arg Raise Call" + }, + { + "library": "cherrypy", + "name": "UTF8StreamEncoder", + "source_code": "class UTF8StreamEncoder:\n\n def __init__(self, iterator):\n self._iterator = iterator\n\n def __iter__(self):\n return self\n\n def next(self):\n return self.__next__()\n\n def __next__(self):\n res = next(self._iterator)\n if isinstance(res, str):\n res = res.encode('utf-8')\n return res\n\n def close(self):\n if is_closable_iterator(self._iterator):\n self._iterator.close()\n\n def __getattr__(self, attr):\n if attr.startswith('__'):\n raise AttributeError(self, attr)\n return getattr(self._iterator, attr)", + "docstring": "UTF8 Stream Encoder.", + "type": "class", + "file_path": "cherrypy\\cherrypy\\lib\\encoding.py", + "ast_data": "ClassDef name:UTF8StreamEncoder FunctionDef name:__init__ arg:self arg:iterator arguments arg arg Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:next arg:self arguments arg Return return:yes Call FunctionDef name:__next__ arg:self arguments arg Assign Call If Call Assign Call Return return:yes FunctionDef name:close arg:self arguments arg If Call Call FunctionDef name:__getattr__ arg:self arg:attr arguments arg arg If Call Raise Call Return return:yes Call" + }, + { + "library": "django", + "name": "get_many", + "source_code": "def get_many(self, keys, version=None):\n d = {}\n for k in keys:\n val = self.get(k, self._missing_key, version=version)\n if val is not self._missing_key:\n d[k] = val\n return d", + "docstring": "Fetch a bunch of keys from the cache. For certain backends (memcached, pgsql) this can be *much* faster when fetching multiple values. Return a dict mapping each key in keys to its value. If the given key is missing, it will be missing from the response dict.", + "type": "method", + "file_path": "django\\django\\core\\cache\\backends\\base.py", + "ast_data": "FunctionDef name:get_many arg:self arg:keys arg:version arguments arg arg arg Assign For Assign Call If Compare Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_sum_prod_no_axis", + "source_code": "def _sum_prod_no_axis(x: Array, dtype: DType | None) -> Array:\n if dtype is not None:\n return x.clone() if dtype == x.dtype else x.to(dtype)\n if x.dtype in (torch.uint8, torch.int8, torch.int16, torch.int32):\n return x.to(torch.int64)\n return x.clone()", + "docstring": "Implements and . Works around", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\torch\\_aliases.py", + "ast_data": "FunctionDef name:_sum_prod_no_axis arg:x arg:dtype arguments arg arg If Compare Return return:yes Compare Call Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "row_starts", + "source_code": "def row_starts(self):\n return self._row_splits[:-1]", + "docstring": "Returns the start indices for rows in this row partition. These indices specify where the values for each row begin. is equal to . Returns: A 1-D integer Tensor with shape . The returned tensor is nonnegative, and is sorted in ascending order. . .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:row_starts arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_block_orth", + "source_code": "def _block_orth(self, p1, p2, p3):\n p1_shape = p1.shape.as_list()\n if p1_shape != p2.shape.as_list() or p1_shape != p3.shape.as_list():\n raise ValueError(f'The dimension of the matrices must be the same. Received p1.shape={p1.shape}, p2.shape={p2.shape} and p3.shape={p3.shape}.')\n n = p1_shape[0]\n eye = linalg_ops_impl.eye(n, dtype=self.dtype)\n kernel2x2x2 = {}\n\n def matmul(p1, p2, p3):\n return math_ops.matmul(math_ops.matmul(p1, p2), p3)\n\n def cast(i, p):\n return i * p + (1 - i) * (eye - p)\n for i in [0, 1]:\n for j in [0, 1]:\n for k in [0, 1]:\n kernel2x2x2[i, j, k] = matmul(cast(i, p1), cast(j, p2), cast(k, p3))\n return kernel2x2x2", + "docstring": "Construct a 3 x 3 kernel. Used to construct orthgonal kernel. Args: p1: A symmetric projection matrix. p2: A symmetric projection matrix. p3: A symmetric projection matrix. Returns: A 2 x 2 x 2 kernel. Raises: ValueError: If the dimensions of p1, p2 and p3 are different.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py", + "ast_data": "FunctionDef name:_block_orth arg:self arg:p1 arg:p2 arg:p3 arguments arg arg arg arg Assign Call If BoolOp Compare Call Compare Call Raise Call Assign Assign Call Assign FunctionDef name:matmul arg:p1 arg:p2 arg:p3 arguments arg arg arg Return return:yes Call Call FunctionDef name:cast arg:i arg:p arguments arg arg Return return:yes For For For Assign Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "benchmark_example_value", + "source_code": "@staticmethod\ndef benchmark_example_value(node):\n if isinstance(node, ir.Layout):\n node = ir.Buffer(name='fake', layout=node)\n if isinstance(node, ir.BaseView):\n node = node.unwrap_view()\n return AlgorithmSelectorCache.generate_example_value(V.graph.sizevars.size_hints(node.get_size(), fallback=config.unbacked_symint_fallback), V.graph.sizevars.size_hints(node.get_stride(), fallback=config.unbacked_symint_fallback), node.get_device(), node.get_dtype(), node.layout.offset, V.graph.sizevars.size_hints(V.graph.get_allocation_size(node), fallback=config.unbacked_symint_fallback))", + "docstring": "Convert an ir.Buffer into a concrete torch.Tensor we can use for benchmarking.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py", + "ast_data": "FunctionDef name:benchmark_example_value arg:node arguments arg If Call Assign Call If Call Assign Call Return return:yes Call Call Call Call Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "stream", + "source_code": "def stream(stream: Optional['torch.cuda.Stream']) -> StreamContext:\n return StreamContext(stream)", + "docstring": "Wrap around the Context-manager StreamContext that selects a given stream. Arguments: stream (Stream): selected stream. This manager is a no-op if it's ``.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:stream arg:stream arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "acheck_password", + "source_code": "async def acheck_password(self, raw_password):\n\n async def setter(raw_password):\n self.set_password(raw_password)\n self._password = None\n await self.asave(update_fields=['password'])\n return await acheck_password(raw_password, self.password, setter)", + "docstring": "See check_password().", + "type": "method", + "file_path": "django\\django\\contrib\\auth\\base_user.py", + "ast_data": "AsyncFunctionDef name:acheck_password arg:self arg:raw_password arguments arg arg AsyncFunctionDef name:setter arg:raw_password arguments arg Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "load", + "source_code": "def load(self) -> RepresentativeDatasetMapping:\n repr_dataset_map = {}\n for signature_def_key, dataset_file in self.dataset_file_map.items():\n if dataset_file.HasField('tfrecord_file_path'):\n repr_dataset_map[signature_def_key] = self._load_tf_record(dataset_file.tfrecord_file_path)\n else:\n raise ValueError('Unsupported Representative Dataset filetype')\n return repr_dataset_map", + "docstring": "Loads the representative datasets. Returns: representative dataset mapping: A signature def key -> representative mapping. The loader loads for each path in and associates the loaded dataset to the corresponding signature def key.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\representative_dataset.py", + "ast_data": "FunctionDef name:load arg:self arguments arg Assign For Call If Call Assign Call Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_patch_dynamo_unsupported_functions", + "source_code": "@contextlib.contextmanager\ndef _patch_dynamo_unsupported_functions():\n import torch.jit\n jit_isinstance = torch.jit.isinstance\n torch.jit.isinstance = isinstance\n logger.info('Replaced torch.jit.isinstance with isinstance to allow dynamo tracing')\n try:\n yield\n finally:\n torch.jit.isinstance = jit_isinstance", + "docstring": "Patch PyTorch to bypass some functions torch.export.export does not support.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_capture_strategies.py", + "ast_data": "FunctionDef name:_patch_dynamo_unsupported_functions arguments Assign Assign Call Try Assign" + }, + { + "library": "pandas", + "name": "_cython_operation", + "source_code": "@final\ndef _cython_operation(self, kind: str, values, how: str, axis: AxisInt, min_count: int=-1, **kwargs) -> ArrayLike:\n assert kind in ['transform', 'aggregate']\n cy_op = WrappedCythonOp(kind=kind, how=how, has_dropped_na=self.has_dropped_na)\n return cy_op.cython_operation(values=values, axis=axis, min_count=min_count, comp_ids=self.ids, ngroups=self.ngroups, **kwargs)", + "docstring": "Returns the values of a cython operation.", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\ops.py", + "ast_data": "FunctionDef name:_cython_operation arg:self arg:kind arg:values arg:how arg:axis arg:min_count arguments arg arg arg arg arg arg arg Compare Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "_get_first_selected_col_from_model", + "source_code": "def _get_first_selected_col_from_model(klass_info):\n concrete_model = klass_info['model']._meta.concrete_model\n for select_index in klass_info['select_fields']:\n if self.select[select_index][0].target.model == concrete_model:\n return self.select[select_index][0]", + "docstring": "Find the first selected column from a model. If it doesn't exist, don't lock a model. select_fields is filled recursively, so it also contains fields from the parent models.", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\compiler.py", + "ast_data": "FunctionDef name:_get_first_selected_col_from_model arg:klass_info arguments arg Assign For If Compare Return return:yes" + }, + { + "library": "tensorflow", + "name": "KerasOpDispatcher", + "source_code": "class KerasOpDispatcher(dispatch.GlobalOpDispatcher):\n\n def handle(self, op, args, kwargs):\n if any((isinstance(x, keras_tensor.KerasTensor) for x in nest.flatten([args, kwargs]))):\n return TFOpLambda(op)(*args, **kwargs)\n else:\n return self.NOT_SUPPORTED", + "docstring": "A global dispatcher that allows building a functional model with TF Ops.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py", + "ast_data": "ClassDef name:KerasOpDispatcher FunctionDef name:handle arg:self arg:op arg:args arg:kwargs arguments arg arg arg arg If Call Call Call Return return:yes Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "apply_input_props_using_example", + "source_code": "def apply_input_props_using_example(graph: Graph, example_input: list[Any]) -> None:\n graph_inputs = list(graph.inputs())\n if len(graph_inputs) == 0:\n return\n in_0 = graph_inputs[0]\n if isinstance(in_0.type(), torch._C.ClassType) and in_0.debugName() == 'self':\n graph_inputs = graph_inputs[1:]\n if not len(graph_inputs) == len(example_input):\n raise RuntimeError('Number of inputs in graph does not match number of inputs in the example')\n for i, (graph_i, example_i) in enumerate(zip(graph_inputs, example_input)):\n if example_i is None:\n continue\n if isinstance(example_i, torch.Tensor) != isinstance(graph_i.type(), TensorType):\n raise RuntimeError(f'Input {i} does not match type of example', graph_i, example_i)\n if isinstance(example_i, torch.Tensor):\n graph_i.setType(TensorType.create_from_tensor(example_i))", + "docstring": "Applies properties for each tensor in the graph inputs using the example supplied.", + "type": "function", + "file_path": "pytorch\\torch\\jit\\_passes\\_property_propagation.py", + "ast_data": "FunctionDef name:apply_input_props_using_example arg:graph arg:example_input arguments arg arg Assign Call Call If Compare Call Return return:no Assign If BoolOp Call Call Compare Call Assign If Compare Call Call Raise Call For Call Call If Compare If Compare Call Call Call Raise Call If Call Call Call" + }, + { + "library": "tensorflow", + "name": "_copy_assets", + "source_code": "def _copy_assets(src_path: str, dst_path: str) -> None:\n for assets_dir_name in [_ASSETS_DIR, _ASSETS_EXTRA_DIR]:\n src_assets_path = file_io.join(src_path, assets_dir_name)\n if not file_io.file_exists_v2(src_assets_path):\n continue\n dst_assets_path = file_io.join(dst_path, assets_dir_name)\n file_io.create_dir_v2(dst_assets_path)\n for curr_dir, _, files in file_io.walk_v2(src_assets_path):\n for asset_file_name in files:\n src_asset_file = file_io.join(curr_dir, asset_file_name)\n curr_dst_dir = curr_dir.replace(src_assets_path, dst_assets_path)\n dst_asset_file = file_io.join(curr_dst_dir, asset_file_name)\n file_io.copy_v2(src_asset_file, dst_asset_file)\n logging.info('Copied asset file: %s -> %s', src_asset_file, dst_asset_file)", + "docstring": "Copies the assets directory of the saved model. Clones the contents of the assets/ directory from the source saved model directory to the destination saved model directory. Nothing will be copied if there are no assets directory in the source directory. Args: src_path: Source saved model directory. dst_path: Destination saved model directory. This directory must exist.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py", + "ast_data": "FunctionDef name:_copy_assets arg:src_path arg:dst_path arguments arg arg For Assign Call If Call Assign Call Call For Call For Assign Call Assign Call Assign Call Call Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n X = validate_data(self, X, accept_sparse='csr', dtype=np.float64)\n self._check_parameters(X.shape[0])\n self._fit(X)\n return self", + "docstring": "Create a biclustering for X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object SpectralBiclustering instance.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cluster\\_bicluster.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "direction", + "source_code": "@property\ndef direction(self) -> Tensor:\n return self._direction", + "docstring": "Return the line direction vector.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\line.py", + "ast_data": "FunctionDef name:direction arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "apply", + "source_code": "def apply(self, X):\n self._check_initialized()\n X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)\n n_estimators, n_classes = self.estimators_.shape\n leaves = np.zeros((X.shape[0], n_estimators, n_classes))\n for i in range(n_estimators):\n for j in range(n_classes):\n estimator = self.estimators_[i, j]\n leaves[:, i, j] = estimator.apply(X, check_input=False)\n return leaves", + "docstring": "Apply trees in the ensemble to X, return leaf indices. .. versionadded:: 0.17 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``. Returns ------- X_leaves : array-like of shape (n_samples, n_estimators, n_classes) For each datapoint x in X and for each tree in the ensemble, return the index of the leaf x ends up in each estimator. In the case of binary classification n_classes is 1.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py", + "ast_data": "FunctionDef name:apply arg:self arg:X arguments arg arg Call Assign Call Assign Assign Call For Call For Call Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "support", + "source_code": "@property\ndef support(self) -> Optional[constraints.Constraint]:\n raise NotImplementedError", + "docstring": "Returns a :class: object representing this distribution's support.", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\distribution.py", + "ast_data": "FunctionDef name:support arg:self arguments arg Raise" + }, + { + "library": "pytorch", + "name": "main", + "source_code": "def main(self, log_path, other_datasets, nrows, heuristic_name, save_dot=False, ranking=False):\n df, choices, cat_feature2cats, dummy_col_2_col_val, metadata = self.get_df(log_path, nrows=nrows, apply_filters=True)\n df_train, df_val, df_test, feature_columns = self.custom_train_test_split(df)\n datasets = {'train': df_train, 'val': df_val, 'test': df_test}\n self.add_real_datasets(datasets, other_datasets, cat_feature2cats)\n max_depths = [5, 6, 7]\n min_samples_leafs = [1, 2, 5, 10]\n choice_columns = [f'{CHOICE_COL}_{choice}' for choice in choices]\n results_df, best_model, threshold = self.train_and_evaluate_models(datasets, feature_columns, choice_columns, max_depths, min_samples_leafs)\n print(results_df.to_string())\n for set_name in results_df['dataset'].unique():\n dataset_results = results_df[results_df['dataset'] == set_name]\n dataset_results = dataset_results.sort_values(by='correct')\n print(dataset_results.to_string() + '\\n')\n feature_names = feature_columns + choice_columns\n self.dt_to_python(best_model, metadata, feature_names, dummy_col_2_col_val, heuristic_name, threshold)", + "docstring": "Main function that trains a decision tree and generates a heuristic.", + "type": "method", + "file_path": "pytorch\\torchgen\\_autoheuristic\\train_regression.py", + "ast_data": "FunctionDef name:main arg:self arg:log_path arg:other_datasets arg:nrows arg:heuristic_name arg:save_dot arg:ranking arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Assign Assign Assign Call Call Call For Call Assign Compare Assign Call Call Call Assign Call" + }, + { + "library": "matplotlib", + "name": "isAvailable", + "source_code": "@classmethod\ndef isAvailable(cls):\n return shutil.which(cls.bin_path()) is not None", + "docstring": "Return whether a MovieWriter subclass is actually available.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\animation.py", + "ast_data": "FunctionDef name:isAvailable arg:cls arguments arg Return return:yes Compare Call Call" + }, + { + "library": "pytorch", + "name": "_make_reduction_prim", + "source_code": "def _make_reduction_prim(name: str, impl_aten, doc):\n return _make_prim(schema=f'{name}(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor', meta=_reduction_meta, impl_aten=impl_aten, return_type=RETURN_TYPE.NEW, doc=doc)", + "docstring": "Creates a reduction prim.", + "type": "function", + "file_path": "pytorch\\torch\\_prims\\__init__.py", + "ast_data": "FunctionDef name:_make_reduction_prim arg:name arg:impl_aten arg:doc arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "share_memory_", + "source_code": "@_share_memory_lock_protected\ndef share_memory_(self, *args, **kwargs):\n return super().share_memory_(*args, **kwargs)", + "docstring": "Moves the storage to shared memory. This is a no-op for storages already in shared memory and for CUDA storages, which do not need to be moved for sharing across processes. Storages in shared memory cannot be resized. Note that to mitigate issues like _ it is thread safe to call this function from multiple threads on the same object. It is NOT thread safe though to call any other function on self without proper synchronization. Please see :doc: for more details. .. note:: When all references to a storage in shared memory are deleted, the associated shared memory object will also be deleted. PyTorch has a special cleanup process to ensure that this happens even if the current process exits unexpectedly. It is worth noting the difference between :meth: and :meth: with `shm_open(3) from_fileopen(2) mmap(2) call `", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:share_memory_ arg:self arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "authlib", + "name": "register", + "source_code": "def register(self, name, overwrite=False, **kwargs):\n self._registry[name] = (overwrite, kwargs)\n return self.create_client(name)", + "docstring": "Registers a new remote application. :param name: Name of the remote application. :param overwrite: Overwrite existing config with framework settings. :param kwargs: Parameters for :class:. Find parameters for the given remote app class. When a remote app is registered, it can be accessed with *named* attribute:: oauth.register('twitter', client_id='', ...) oauth.twitter.get('timeline')", + "type": "method", + "file_path": "authlib\\authlib\\integrations\\base_client\\registry.py", + "ast_data": "FunctionDef name:register arg:self arg:name arg:overwrite arguments arg arg arg arg Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "ModelAnalyzer", + "source_code": "@_tf_export('lite.experimental.Analyzer')\nclass ModelAnalyzer:\n\n @staticmethod\n def analyze(model_path=None, model_content=None, gpu_compatibility=False, **kwargs):\n if not model_path and (not model_content):\n raise ValueError('neither `model_path` nor `model_content` is provided')\n if model_path:\n print(f'=== {model_path} ===\\n')\n tflite_model = model_path\n input_is_filepath = True\n else:\n print('=== TFLite ModelAnalyzer ===\\n')\n tflite_model = model_content\n input_is_filepath = False\n if kwargs.get('experimental_use_mlir', False):\n print(wrap_converter.wrapped_flat_buffer_file_to_mlir(tflite_model, input_is_filepath))\n else:\n print(_analyzer_wrapper.ModelAnalyzer(tflite_model, input_is_filepath, gpu_compatibility))", + "docstring": "Provides a collection of TFLite model analyzer tools. Example: WARNING: Experimental interface, subject to change.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\lite\\python\\analyzer.py", + "ast_data": "ClassDef name:ModelAnalyzer FunctionDef name:analyze arg:model_path arg:model_content arg:gpu_compatibility arguments arg arg arg arg If BoolOp Raise Call If Call Assign Assign Call Assign Assign If Call Call Call Call Call Call" + }, + { + "library": "scrapy", + "name": "connectionLost", + "source_code": "def connectionLost(self, reason: Failure=connectionDone) -> None:\n self.setTimeout(None)\n if not reason.check(connectionDone):\n self._conn_lost_errors.append(reason)\n self._conn_lost_deferred.callback(self._conn_lost_errors)\n for stream in self.streams.values():\n if stream.metadata['request_sent']:\n close_reason = StreamCloseReason.CONNECTION_LOST\n else:\n close_reason = StreamCloseReason.INACTIVE\n stream.close(close_reason, self._conn_lost_errors, from_protocol=True)\n self.metadata['active_streams'] -= len(self.streams)\n self.streams.clear()\n self._pending_request_stream_pool.clear()\n self.conn.close_connection()", + "docstring": "Called by Twisted when the transport connection is lost. No need to write anything to transport here.", + "type": "method", + "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py", + "ast_data": "FunctionDef name:connectionLost arg:self arg:reason arguments arg arg Call If Call Call Call For Call If Assign Assign Call Call Call Call Call" + }, + { + "library": "sphinx", + "name": "add_transform", + "source_code": "def add_transform(self, transform: type[Transform]) -> None:\n self.registry.add_transform(transform)", + "docstring": "Register a Docutils transform to be applied after parsing. Add the standard docutils :class: subclass *transform* to the list of transforms that are applied after Sphinx parses a reST document. :param transform: A transform class .. list-table:: priority range categories for Sphinx transforms :widths: 20,80 * - Priority - Main purpose in Sphinx * - 0-99 - Fix invalid nodes by docutils. Translate a doctree. * - 100-299 - Preparation * - 300-399 - early * - 400-699 - main * - 700-799 - Post processing. Deadline to modify text and referencing. * - 800-899 - Collect referencing and referenced nodes. Domain processing. * - 900-999 - Finalize and clean up. refs: __ __", + "type": "method", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "FunctionDef name:add_transform arg:self arg:transform arguments arg arg Call" + }, + { + "library": "pytorch", + "name": "evaluate_symexpr", + "source_code": "def evaluate_symexpr(self, code: str) -> Union[int, float, bool]:\n args = {str(e): val for e, val in self.var_to_val.items()}\n return eval(code, SYMPY_INTERP, args)", + "docstring": "To be used by compile_fx to evaluate symexprs", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:evaluate_symexpr arg:self arg:code arguments arg arg Assign Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "sample_padded_inliers", + "source_code": "def sample_padded_inliers(xsamples: Tensor, ysamples: Tensor, inlier_counts: Tensor, inl_ransidx: Tensor, inl_sampleidx: Tensor, numransacs: int, dv: torch.device) -> Tuple[Tensor, Tensor]:\n maxinliers = int(torch.max(inlier_counts).item())\n dtype = xsamples.dtype\n padded_inlier_x = torch.zeros(size=(numransacs, maxinliers, 2), device=dv, dtype=dtype)\n padded_inlier_y = torch.zeros(size=(numransacs, maxinliers, 2), device=dv, dtype=dtype)\n padded_inlier_x[inl_ransidx, piecewise_arange(inl_ransidx)] = xsamples[inl_sampleidx]\n padded_inlier_y[inl_ransidx, piecewise_arange(inl_ransidx)] = ysamples[inl_sampleidx]\n return (padded_inlier_x, padded_inlier_y)", + "docstring": "Sample from padded inliers.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\adalam\\ransac.py", + "ast_data": "FunctionDef name:sample_padded_inliers arg:xsamples arg:ysamples arg:inlier_counts arg:inl_ransidx arg:inl_sampleidx arg:numransacs arg:dv arguments arg arg arg arg arg arg arg Assign Call Call Call Assign Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "check_and_replace", + "source_code": "def check_and_replace(inp: str, src: str, dst: str) -> str:\n if src not in inp:\n raise RuntimeError(f\"Can't find ${src} in the input\")\n return inp.replace(src, dst)", + "docstring": "Checks that can be found in and replaces it with", + "type": "function", + "file_path": "pytorch\\.github\\scripts\\build_triton_wheel.py", + "ast_data": "FunctionDef name:check_and_replace arg:inp arg:src arg:dst arguments arg arg arg If Compare Raise Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "inverse_pinhole_matrix", + "source_code": "def inverse_pinhole_matrix(pinhole: Tensor, eps: float=1e-06) -> Tensor:\n if not (len(pinhole.shape) == 2 and pinhole.shape[1] == 12):\n raise AssertionError(pinhole.shape)\n fx, fy, cx, cy = torch.chunk(pinhole[..., :4], 4, dim=1)\n k = eye(4, device=pinhole.device, dtype=pinhole.dtype)\n k = k.view(1, 4, 4).repeat(pinhole.shape[0], 1, 1)\n k[..., 0, 0:1] = 1.0 / (fx + eps)\n k[..., 1, 1:2] = 1.0 / (fy + eps)\n k[..., 0, 2:3] = -1.0 * cx / (fx + eps)\n k[..., 1, 2:3] = -1.0 * cy / (fy + eps)\n return k", + "docstring": "Return the inverted pinhole matrix from a pinhole model. .. note:: This method is going to be deprecated in version 0.2 in favour of :attr:. Args: pinhole: tensor with pinhole models. eps: epsilon for numerical stability. Returns: tensor of inverted pinhole matrices. Shape: - Input: :math: - Output: :math: Example: >>> rng = torch.manual_seed(0) >>> pinhole = torch.rand(1, 12) # Nx12 >>> inverse_pinhole_matrix(pinhole) # Nx4x4 tensor([[[ 2.0151, 0.0000, -0.1783, 0.0000], [ 0.0000, 1.3017, -0.1719, 0.0000], [ 0.0000, 0.0000, 1.0000, 0.0000], [ 0.0000, 0.0000, 0.0000, 1.0000]]])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", + "ast_data": "FunctionDef name:inverse_pinhole_matrix arg:pinhole arg:eps arguments arg arg If BoolOp Compare Call Compare Raise Call Assign Call Assign Call Assign Call Call Assign Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_event_reduce_dims", + "source_code": "def _get_event_reduce_dims(self, min_event_ndims, event_ndims):\n event_ndims_ = self._maybe_get_static_event_ndims(event_ndims)\n if event_ndims_ is not None:\n return [-index for index in range(1, event_ndims_ - min_event_ndims + 1)]\n else:\n reduce_ndims = event_ndims - min_event_ndims\n return math_ops.range(-reduce_ndims, 0)", + "docstring": "Compute the reduction dimensions given event_ndims.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py", + "ast_data": "FunctionDef name:_get_event_reduce_dims arg:self arg:min_event_ndims arg:event_ndims arguments arg arg arg Assign Call If Compare Return return:yes Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "variable_shape", + "source_code": "@property\ndef variable_shape(self):\n return tensor_shape.TensorShape([self.dimension])", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:variable_shape arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "forward", + "source_code": "def forward(self):\n self._pos = min(self._pos + 1, len(self._elements) - 1)\n return self()", + "docstring": "Move the position forward and return the current element.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:forward arg:self arguments arg Assign Call Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "_str_term_unicode", + "source_code": "@classmethod\ndef _str_term_unicode(cls, i, arg_str):\n if cls.basis_name is None:\n raise NotImplementedError('Subclasses must define either a basis_name, or override _str_term_unicode(cls, i, arg_str)')\n return f'·{cls.basis_name}{i.translate(cls._subscript_mapping)}({arg_str})'", + "docstring": "String representation of single polynomial term using unicode characters for superscripts and subscripts.", + "type": "method", + "file_path": "numpy\\numpy\\polynomial\\_polybase.py", + "ast_data": "FunctionDef name:_str_term_unicode arg:cls arg:i arg:arg_str arguments arg arg arg If Compare Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, feature_columns, weight_collections=None, trainable=True, cols_to_vars=None, name='feature_column_input_layer', create_scope_now=True):\n self._feature_columns = feature_columns\n self._weight_collections = weight_collections\n self._trainable = trainable\n self._cols_to_vars = cols_to_vars\n self._name = name\n self._input_layer_template = template.make_template(self._name, _internal_input_layer, create_scope_now_=create_scope_now)\n self._scope = self._input_layer_template.variable_scope", + "docstring": "See .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:feature_columns arg:weight_collections arg:trainable arg:cols_to_vars arg:name arg:create_scope_now arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Call Assign" + }, + { + "library": "scipy", + "name": "Quadratic", + "source_code": "class Quadratic(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n self.custom_bounds = [(0, 1), (0, 1)]\n self.global_optimum = [[0.19388, 0.48513]]\n self.fglob = -3873.72418\n\n def fun(self, x, *args):\n self.nfev += 1\n return -3803.84 - 138.08 * x[0] - 232.92 * x[1] + 128.08 * x[0] ** 2.0 + 203.64 * x[1] ** 2.0 + 182.25 * x[0] * x[1]", + "docstring": "Quadratic objective function. This class defines the Quadratic [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Quadratic}}(x) = -3803.84 - 138.08x_1 - 232.92x_2 + 128.08x_1^2 + 203.64x_2^2 + 182.25x_1x_2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_Q.py", + "ast_data": "ClassDef name:Quadratic Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes" + }, + { + "library": "scrapy", + "name": "get_meta_refresh", + "source_code": "def get_meta_refresh(response: TextResponse, ignore_tags: Iterable[str]=('script', 'noscript')) -> tuple[None, None] | tuple[float, str]:\n if response not in _metaref_cache:\n text = response.text[0:4096]\n _metaref_cache[response] = html.get_meta_refresh(text, response.url, response.encoding, ignore_tags=ignore_tags)\n return _metaref_cache[response]", + "docstring": "Parse the http-equiv refresh parameter from the given response", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\response.py", + "ast_data": "FunctionDef name:get_meta_refresh arg:response arg:ignore_tags arguments arg arg If Compare Assign Assign Call Return return:yes" + }, + { + "library": "django", + "name": "value", + "source_code": "@property\ndef value(self):\n return self.as_string()", + "docstring": "Return the value of this Field.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\field.py", + "ast_data": "FunctionDef name:value arg:self arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_joint_probabilities_nn", + "source_code": "def _joint_probabilities_nn(distances, desired_perplexity, verbose):\n t0 = time()\n distances.sort_indices()\n n_samples = distances.shape[0]\n distances_data = distances.data.reshape(n_samples, -1)\n distances_data = distances_data.astype(np.float32, copy=False)\n conditional_P = _utils._binary_search_perplexity(distances_data, desired_perplexity, verbose)\n assert np.all(np.isfinite(conditional_P)), 'All probabilities should be finite'\n P = csr_matrix((conditional_P.ravel(), distances.indices, distances.indptr), shape=(n_samples, n_samples))\n P = P + P.T\n sum_P = np.maximum(P.sum(), MACHINE_EPSILON)\n P /= sum_P\n assert np.all(np.abs(P.data) <= 1.0)\n if verbose >= 2:\n duration = time() - t0\n print('[t-SNE] Computed conditional probabilities in {:.3f}s'.format(duration))\n return P", + "docstring": "Compute joint probabilities p_ij from distances using just nearest neighbors. This method is approximately equal to _joint_probabilities. The latter is O(N), but limiting the joint probability to nearest neighbors improves this substantially to O(uN). Parameters ---------- distances : sparse matrix of shape (n_samples, n_samples) Distances of samples to its n_neighbors nearest neighbors. All other distances are left to zero (and are not materialized in memory). Matrix should be of CSR format. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : sparse matrix of shape (n_samples, n_samples) Condensed joint probability matrix with only nearest neighbors. Matrix will be of CSR format.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\manifold\\_t_sne.py", + "ast_data": "FunctionDef name:_joint_probabilities_nn arg:distances arg:desired_perplexity arg:verbose arguments arg arg arg Assign Call Call Assign Assign Call Assign Call Assign Call Call Call Assign Call Call Assign Assign Call Call Call Compare Call If Compare Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "Profile", + "source_code": "@tf_export('profiler.experimental.Profile', v1=[])\nclass Profile(object):\n\n def __init__(self, logdir, options=None):\n self._logdir = logdir\n self._options = options\n\n def __enter__(self):\n start(self._logdir, self._options)\n\n def __exit__(self, typ, value, tb):\n stop()", + "docstring": "Context-manager profile API. Profiling will start when entering the scope, and stop and save the results to the logdir when exits the scope. Open TensorBoard profile tab to view results. Example usage:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\profiler_v2.py", + "ast_data": "ClassDef name:Profile FunctionDef name:__init__ arg:self arg:logdir arg:options arguments arg arg arg Assign Assign FunctionDef name:__enter__ arg:self arguments arg Call FunctionDef name:__exit__ arg:self arg:typ arg:value arg:tb arguments arg arg arg arg Call Call" + }, + { + "library": "pytorch", + "name": "bf16_compress_hook", + "source_code": "def bf16_compress_hook(process_group: dist.ProcessGroup, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:\n return _compress_hook(torch.bfloat16, process_group, bucket)", + "docstring": "Warning: This API is experimental, and it requires NCCL version later than 2.9.6. This DDP communication hook implements a simple gradient compression approach that casts `Brain floating point format `). Example:: >>> # xdoctest: +SKIP >>> ddp_model.register_comm_hook(process_group, bf16_compress_hook)", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\default_hooks.py", + "ast_data": "FunctionDef name:bf16_compress_hook arg:process_group arg:bucket arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "QueueClosedError", + "source_code": "class QueueClosedError(Exception):\n pass", + "docstring": "Raised when CloseableQueue.put() fails because the queue is closed.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py", + "ast_data": "ClassDef name:QueueClosedError" + }, + { + "library": "pytorch", + "name": "load", + "source_code": "@classmethod\ndef load(cls, source_code: str, dst_file_ext: str) -> tuple[DLLWrapper, str, str]:\n if dst_file_ext != 'so':\n raise RuntimeError(f'Only support loading a .so file for now. Requested file extension: {dst_file_ext}. Source code: {source_code}')\n dst_file_path, hash_key, source_code_path = cls.compile(source_code, dst_file_ext)\n return (DLLWrapper(dst_file_path), hash_key, source_code_path)", + "docstring": "Compiles source code and loads the generated .so file. Returns a tuple of DLLWrapper, hash_key, source_code_path", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codecache.py", + "ast_data": "FunctionDef name:load arg:cls arg:source_code arg:dst_file_ext arguments arg arg arg If Compare Raise Call Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_hatch", + "source_code": "def get_hatch(self):\n return self._hatch", + "docstring": "Return the current hatching pattern.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:get_hatch arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "feature_names_in_", + "source_code": "@property\ndef feature_names_in_(self):\n return self.transformer_list[0][1].feature_names_in_", + "docstring": "Names of features seen during :term:.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:feature_names_in_ arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "get_float4_shape", + "source_code": "def get_float4_shape(tensor: torch.Tensor) -> tuple[int, ...]:\n assert tensor.dtype == torch.float4_e2m1fn_x2\n return (*tensor.shape, 2)", + "docstring": "Get the shape of an unpacked float4 tensor. The float4_e2m1fn_x2 type is a shell type described in the shell dtype is takes up 1 byte per element and semantically represents two fp4 values packed into 1 byte. Semantically it represents (*tensor.shape, 2) fp4 elements.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_type_casting.py", + "ast_data": "FunctionDef name:get_float4_shape arg:tensor arguments arg Compare Return return:yes" + }, + { + "library": "pytorch", + "name": "is_initialized", + "source_code": "def is_initialized() -> bool:\n return True", + "docstring": "Returns True if the CPU is initialized. Always True. N.B. This function only exists to facilitate device-agnostic code", + "type": "function", + "file_path": "pytorch\\torch\\cpu\\__init__.py", + "ast_data": "FunctionDef name:is_initialized arguments Return return:yes" + }, + { + "library": "pandas", + "name": "_new_DatetimeIndex", + "source_code": "def _new_DatetimeIndex(cls, d):\n if 'data' in d and (not isinstance(d['data'], DatetimeIndex)):\n data = d.pop('data')\n if not isinstance(data, DatetimeArray):\n tz = d.pop('tz')\n freq = d.pop('freq')\n dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq)\n else:\n dta = data\n for key in ['tz', 'freq']:\n if key in d:\n assert d[key] == getattr(dta, key)\n d.pop(key)\n result = cls._simple_new(dta, **d)\n else:\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n result = cls.__new__(cls, **d)\n return result", + "docstring": "This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__", + "type": "function", + "file_path": "pandas\\pandas\\core\\indexes\\datetimes.py", + "ast_data": "FunctionDef name:_new_DatetimeIndex arg:cls arg:d arguments arg arg If BoolOp Compare Call Assign Call If Call Assign Call Assign Call Assign Call Call Assign For If Compare Compare Call Call Assign Call With Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_transform_feature", + "source_code": "def _transform_feature(self, inputs):\n id_weight_pair = self.categorical_column._get_sparse_tensors(inputs)\n id_tensor = id_weight_pair.id_tensor\n weight_tensor = id_weight_pair.weight_tensor\n if weight_tensor is not None:\n weighted_column = sparse_ops.sparse_merge(sp_ids=id_tensor, sp_values=weight_tensor, vocab_size=int(self._variable_shape[-1]))\n weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0], weighted_column.dense_shape)\n return array_ops.scatter_nd(weighted_column.indices, weighted_column.values, weighted_column.dense_shape)\n dense_id_tensor = sparse_ops.sparse_tensor_to_dense(id_tensor, default_value=-1)\n one_hot_id_tensor = array_ops.one_hot(dense_id_tensor, depth=self._variable_shape[-1], on_value=1.0, off_value=0.0)\n return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])", + "docstring": "Returns dense representing feature. Args: inputs: A object to access inputs. Returns: Transformed feature . Raises: ValueError: if input rank is not known at graph building time.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_transform_feature arg:self arg:inputs arguments arg arg Assign Call Assign Assign If Compare Assign Call Call Assign Call Return return:yes Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "InputLayerSavedModelSaver", + "source_code": "class InputLayerSavedModelSaver(base_serialization.SavedModelSaver):\n\n @property\n def object_identifier(self):\n return constants.INPUT_LAYER_IDENTIFIER\n\n @property\n def python_properties(self):\n return dict(class_name=type(self.obj).__name__, name=self.obj.name, dtype=self.obj.dtype, sparse=self.obj.sparse, ragged=self.obj.ragged, batch_input_shape=self.obj._batch_input_shape, config=self.obj.get_config())\n\n def objects_to_serialize(self, serialization_cache):\n return {}\n\n def functions_to_serialize(self, serialization_cache):\n return {}", + "docstring": "InputLayer serialization.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\layer_serialization.py", + "ast_data": "ClassDef name:InputLayerSavedModelSaver FunctionDef name:object_identifier arg:self arguments arg Return return:yes FunctionDef name:python_properties arg:self arguments arg Return return:yes Call Call Call FunctionDef name:objects_to_serialize arg:self arg:serialization_cache arguments arg arg Return return:no FunctionDef name:functions_to_serialize arg:self arg:serialization_cache arguments arg arg Return return:no" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, node_def, op, message, *args):\n super(UnauthenticatedError, self).__init__(node_def, op, message, UNAUTHENTICATED, *args)", + "docstring": "Creates an .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call" + }, + { + "library": "scikit-learn", + "name": "inplace_identity", + "source_code": "def inplace_identity(X):\n pass", + "docstring": "Simply leave the input array unchanged. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Data, where is the number of samples and is the number of features.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py", + "ast_data": "FunctionDef name:inplace_identity arg:X arguments arg" + }, + { + "library": "tensorflow", + "name": "SmartBroadcastGradientArgs", + "source_code": "def SmartBroadcastGradientArgs(x, y, grad=None):\n del grad\n x_shape = array_ops.shape(x)\n y_shape = array_ops.shape(y)\n if not context.executing_eagerly() and isinstance(x, tensor.Tensor) and isinstance(y, tensor.Tensor):\n x_axes, y_axes = _InferGradientReductionAxes(x.shape, y.shape)\n else:\n x_axes, y_axes = (None, None)\n if x_axes is None or y_axes is None:\n x_axes, y_axes = gen_array_ops.broadcast_gradient_args(x_shape, y_shape)\n x_must_reduce = True\n y_must_reduce = True\n else:\n x_must_reduce = x_axes or x.shape.rank < y.shape.rank\n y_must_reduce = y_axes or y.shape.rank < x.shape.rank\n return ((x_shape, x_axes, x_must_reduce), (y_shape, y_axes, y_must_reduce))", + "docstring": "Version of optimized for partially-known shapes. Args: x: The first argument of a broadcasting binary op. y: The second argument of a broadcasting binary op. grad: Deprecated. Returns: A pair of triples, one per argument with * Shape of the argument (tensor); * Reduction axes for the argument (list or tensor); * Boolean indicating whether the reduction must be applied.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:SmartBroadcastGradientArgs arg:x arg:y arg:grad arguments arg arg arg Assign Call Assign Call If BoolOp Call Call Call Assign Call Assign If BoolOp Compare Compare Assign Call Assign Assign Assign BoolOp Compare Assign BoolOp Compare Return return:yes" + }, + { + "library": "tensorflow", + "name": "debug_print", + "source_code": "def debug_print(self, node):\n if __debug__:\n print(pretty_printer.fmt(node))\n return node", + "docstring": "Helper method useful for debugging. Prints the AST.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transformer.py", + "ast_data": "FunctionDef name:debug_print arg:self arg:node arguments arg arg If Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_consolidate_input_dtype", + "source_code": "def _consolidate_input_dtype(self, computed_dtype: torch.dtype, result_dtype: torch.dtype) -> torch.dtype:\n if not self._USE_OPMATH and self.promotion_kind in (_prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, _prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT):\n return result_dtype\n return computed_dtype", + "docstring": "Although opmath is the right thing to do to retain on-par precision, it inserts upcasts everywhere in the graph. This is particularly hard for backend to optimize since there is no way to differentiate between inserted upcasts and model code casts. Hence we consolidate the input dtype to the result dtype to avoid this.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py", + "ast_data": "FunctionDef name:_consolidate_input_dtype arg:self arg:computed_dtype arg:result_dtype arguments arg arg arg If BoolOp Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_grad", + "source_code": "def _grad(op, grad):\n fft_length = op.inputs[1]\n fft_length_static = _tensor_util.constant_value(fft_length)\n if fft_length_static is not None:\n fft_length = fft_length_static\n real_dtype = grad.dtype\n if real_dtype == _dtypes.float32:\n complex_dtype = _dtypes.complex64\n elif real_dtype == _dtypes.float64:\n complex_dtype = _dtypes.complex128\n is_odd = _math_ops.mod(fft_length[-1], 2)\n input_last_dimension = _array_ops.shape(op.inputs[0])[-1]\n mask = _array_ops.concat([[1.0], 2.0 * _array_ops.ones([input_last_dimension - 2 + is_odd], real_dtype), _array_ops.ones([1 - is_odd], real_dtype)], 0)\n rsize = _math_ops.reciprocal(_math_ops.cast(_fft_size_for_grad(grad, rank), real_dtype))\n the_rfft = rfft_fn(grad, fft_length)\n return (the_rfft * _math_ops.cast(rsize * mask, complex_dtype), None)", + "docstring": "A gradient function for IRFFT with the provided and .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\fft_ops.py", + "ast_data": "FunctionDef name:_grad arg:op arg:grad arguments arg arg Assign Assign Call If Compare Assign Assign If Compare Assign If Compare Assign Assign Call Assign Call Assign Call Call Call Assign Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_ABW", + "source_code": "class _ABW:\n\n def __init__(self):\n self.m = None\n self.n = None\n self.astart = None\n self.total = None\n self.freqs = None\n\n def _recalc(self, n, m):\n if n != self.n or m != self.m:\n self.n, self.m = (n, m)\n astart, a1, _ = gscale(n, m)\n self.astart = astart\n self.freqs = a1.astype(np.float64)\n self.total = self.freqs.sum()\n\n def pmf(self, k, n, m):\n self._recalc(n, m)\n ind = np.floor(k - self.astart).astype(int)\n return self.freqs[ind] / self.total\n\n def cdf(self, k, n, m):\n self._recalc(n, m)\n ind = np.ceil(k - self.astart).astype(int)\n return self.freqs[:ind + 1].sum() / self.total\n\n def sf(self, k, n, m):\n self._recalc(n, m)\n ind = np.floor(k - self.astart).astype(int)\n return self.freqs[ind:].sum() / self.total", + "docstring": "Distribution of Ansari-Bradley W-statistic under the null hypothesis.", + "type": "class", + "file_path": "scipy\\scipy\\stats\\_morestats.py", + "ast_data": "ClassDef name:_ABW FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign Assign FunctionDef name:_recalc arg:self arg:n arg:m arguments arg arg arg If BoolOp Compare Compare Assign Assign Call Assign Assign Call Assign Call FunctionDef name:pmf arg:self arg:k arg:n arg:m arguments arg arg arg arg Call Assign Call Call Return return:yes FunctionDef name:cdf arg:self arg:k arg:n arg:m arguments arg arg arg arg Call Assign Call Call Return return:yes Call FunctionDef name:sf arg:self arg:k arg:n arg:m arguments arg arg arg arg Call Assign Call Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "require_sphinx", + "source_code": "@staticmethod\ndef require_sphinx(version: tuple[int, int] | str) -> None:\n if isinstance(version, tuple):\n major, minor = version\n else:\n major, minor = map(int, version.split('.')[:2])\n if (major, minor) > sphinx.version_info[:2]:\n req = f'{major}.{minor}'\n raise VersionRequirementError(req)", + "docstring": "Check the Sphinx version if requested. Compare *version* with the version of the running Sphinx, and abort the build when it is too old. :param version: The required version in the form of `` form.", + "type": "method", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "FunctionDef name:require_sphinx arg:version arguments arg If Call Assign Assign Call Call If Compare Assign Raise Call" + }, + { + "library": "matplotlib", + "name": "_internal_update", + "source_code": "def _internal_update(self, kwargs):\n return self._update_props(kwargs, '{cls.__name__}.set() got an unexpected keyword argument {prop_name!r}')", + "docstring": "Update artist properties without prenormalizing them, but generating errors as if calling . The lack of prenormalization is to maintain backcompatibility.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:_internal_update arg:self arg:kwargs arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "ScopedTFBuffer", + "source_code": "class ScopedTFBuffer(object):\n __slots__ = ['buffer']\n\n def __init__(self, buf_string):\n self.buffer = c_api.TF_NewBufferFromString(compat.as_bytes(buf_string))\n\n def __del__(self):\n c_api.TF_DeleteBuffer(self.buffer)", + "docstring": "An internal class to help manage the TF_Buffer lifetime.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\c_api_util.py", + "ast_data": "ClassDef name:ScopedTFBuffer Assign FunctionDef name:__init__ arg:self arg:buf_string arguments arg arg Assign Call Call FunctionDef name:__del__ arg:self arguments arg Call" + }, + { + "library": "scipy", + "name": "bernoulli", + "source_code": "def bernoulli(n):\n if not isscalar(n) or n < 0:\n raise ValueError('n must be a non-negative integer.')\n n = int(n)\n if n < 2:\n n1 = 2\n else:\n n1 = n\n return _specfun.bernob(int(n1))[:n + 1]", + "docstring": "Bernoulli numbers B0..Bn (inclusive). Parameters ---------- n : int Indicated the number of terms in the Bernoulli series to generate. Returns ------- ndarray The Bernoulli numbers `bernoulli`.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_basic.py", + "ast_data": "FunctionDef name:bernoulli arg:n arguments arg If BoolOp Call Compare Raise Call Assign Call If Compare Assign Assign Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_parse_rendezvous_config", + "source_code": "def _parse_rendezvous_config(config_str: str) -> dict[str, str]:\n config: dict[str, str] = {}\n config_str = config_str.strip()\n if not config_str:\n return config\n key_values = config_str.split(',')\n for kv in key_values:\n key, *values = kv.split('=', 1)\n key = key.strip()\n if not key:\n raise ValueError('The rendezvous configuration string must be in format =,...,=.')\n value: Optional[str]\n if values:\n value = values[0].strip()\n else:\n value = None\n if not value:\n raise ValueError(f\"The rendezvous configuration option '{key}' must have a value specified.\")\n config[key] = value\n return config", + "docstring": "Extract key-value pairs from a rendezvous configuration string. Args: config_str: A string in format =,...,=.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\utils.py", + "ast_data": "FunctionDef name:_parse_rendezvous_config arg:config_str arguments arg Assign Call If Return return:yes Assign Call For Assign Call Assign Call If Raise Call If Assign Call Assign If Raise Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "_a_ij_Aij_Dij2", + "source_code": "def _a_ij_Aij_Dij2(A):\n m, n = A.shape\n count = 0\n for i in range(m):\n for j in range(n):\n count += A[i, j] * (_Aij(A, i, j) - _Dij(A, i, j)) ** 2\n return count", + "docstring": "A term that appears in the ASE of Kendall's tau and Somers' D.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_pythran.py", + "ast_data": "FunctionDef name:_a_ij_Aij_Dij2 arg:A arguments arg Assign Assign For Call For Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "getY", + "source_code": "def getY(self, index):\n return self.getOrdinate(1, index)", + "docstring": "Get the Y value at the given index.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py", + "ast_data": "FunctionDef name:getY arg:self arg:index arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "experimental_logical_device", + "source_code": "@contextlib.contextmanager\ndef experimental_logical_device(self, logical_device_id):\n num_logical_devices_per_replica = self._tpu_devices.shape[1]\n if logical_device_id >= num_logical_devices_per_replica:\n raise ValueError('`logical_device_id` not in range (was {}, but there are only {} logical devices per replica).'.format(logical_device_id, num_logical_devices_per_replica))\n self._logical_device_stack.append(logical_device_id)\n try:\n if tpu_util.enclosing_tpu_context() is None:\n yield\n else:\n with ops.device(tpu.core(logical_device_id)):\n yield\n finally:\n self._logical_device_stack.pop()", + "docstring": "Places variables and ops on the specified logical device.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py", + "ast_data": "FunctionDef name:experimental_logical_device arg:self arg:logical_device_id arguments arg arg Assign If Compare Raise Call Call Call Try If Compare Call With Call Call Call" + }, + { + "library": "scipy", + "name": "logpdf", + "source_code": "def logpdf(self, x, *args, **kwds):\n args, loc, scale = self._parse_args(*args, **kwds)\n x, loc, scale = map(asarray, (x, loc, scale))\n args = tuple(map(asarray, args))\n dtyp = np.promote_types(x.dtype, np.float64)\n x = np.asarray((x - loc) / scale, dtype=dtyp)\n cond0 = self._argcheck(*args) & (scale > 0)\n cond1 = self._support_mask(x, *args) & (scale > 0)\n cond = cond0 & cond1\n output = empty(shape(cond), dtyp)\n output.fill(-inf)\n putmask(output, 1 - cond0 + np.isnan(x), self.badvalue)\n if np.any(cond):\n goodargs = argsreduce(cond, *(x,) + args + (scale,))\n scale, goodargs = (goodargs[-1], goodargs[:-1])\n place(output, cond, self._logpdf(*goodargs) - log(scale))\n if output.ndim == 0:\n return output[()]\n return output", + "docstring": "Log of the probability density function at x of the given RV. This uses a more numerically accurate calculation if available. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- logpdf : array_like Log of the probability density function evaluated at x", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:logpdf arg:self arg:x arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Compare Assign Call Compare Assign Assign Call Call Call Call Call If Call Assign Call Assign Call Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "virtualenv", + "name": "reset", + "source_code": "def reset(self):\n pass", + "docstring": "This is a temporary folder, is already empty to start with.", + "type": "method", + "file_path": "virtualenv\\src\\virtualenv\\app_data\\via_tempdir.py", + "ast_data": "FunctionDef name:reset arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "Softshrink", + "source_code": "class Softshrink(Module):\n __constants__ = ['lambd']\n lambd: float\n\n def __init__(self, lambd: float=0.5) -> None:\n super().__init__()\n self.lambd = lambd\n\n def forward(self, input: Tensor) -> Tensor:\n return F.softshrink(input, self.lambd)\n\n def extra_repr(self) -> str:\n return str(self.lambd)", + "docstring": "Applies the soft shrinkage function element-wise. .. math:: \\text{SoftShrinkage}(x) = \\begin{cases} x - \\lambda, & \\text{ if } x > \\lambda \\\\ x + \\lambda, & \\text{ if } x >> m = nn.Softshrink() >>> input = torch.randn(2) >>> output = m(input)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\activation.py", + "ast_data": "ClassDef name:Softshrink Assign FunctionDef name:__init__ arg:self arg:lambd arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "_get_file_path", + "source_code": "@classmethod\ndef _get_file_path(cls, model_name: str, cache_dir: Optional[str], suffix: Optional[str]=None) -> str:\n if cache_dir is None:\n cache_dir = kornia_config.hub_cache_dir\n if suffix is not None and (not model_name.endswith(suffix)):\n file_name = f'{os.path.split(model_name)[-1]}{suffix}'\n else:\n file_name = os.path.split(model_name)[-1]\n file_path = os.path.join(*cache_dir.split(os.sep), *model_name.split(os.sep)[:-1], file_name)\n return file_path", + "docstring": "Construct the file path for the ONNX model based on the model name and cache directory. Args: model_name: The name of the model or operator, typically in the format 'operators/model_name'. cache_dir: The directory where the model should be cached. Defaults to None, which will use a default directory. suffix: Optional file suffix when the filename is the model name. Returns: str: The full local path where the model should be stored or loaded from.", + "type": "method", + "file_path": "kornia\\kornia\\utils\\download.py", + "ast_data": "FunctionDef name:_get_file_path arg:cls arg:model_name arg:cache_dir arg:suffix arguments arg arg arg arg If Compare Assign If BoolOp Compare Call Assign Call Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_generator", + "source_code": "def _get_generator(device: torch.device) -> torch._C.Generator:\n idx = device.index\n if idx is None:\n idx = current_device()\n return torch.xpu.default_generators[idx]", + "docstring": "Return the XPU Generator object for the given device. Args: device (torch.device): selected device.", + "type": "function", + "file_path": "pytorch\\torch\\xpu\\__init__.py", + "ast_data": "FunctionDef name:_get_generator arg:device arguments arg Assign If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "lookup", + "source_code": "def lookup(self, keys, name=None):\n if keys.dtype.base_dtype != self._key_dtype:\n raise TypeError(f'Dtype of argument `keys` must be {self._key_dtype}, received: {keys.dtype}')\n values = keys\n if isinstance(keys, (sparse_tensor.SparseTensor, internal.RaggedTensor)):\n values = keys.values\n if self._table and self._table.key_dtype.base_dtype == dtypes.int64:\n values = math_ops.cast(values, dtypes.int64)\n if self._num_oov_buckets == 0:\n ids = self._table.lookup(values, name=name)\n else:\n with ops.name_scope(name, '%s_Lookup' % self.name):\n str_to_hash_bucket = self._get_string_to_hash_bucket_fn(self._hasher_spec)\n buckets = str_to_hash_bucket(_as_string(values), num_buckets=self._num_oov_buckets, name='hash_bucket')\n if self._table:\n ids = self._table.lookup(values)\n buckets = math_ops.add(buckets, self._table.size())\n is_id_non_default = math_ops.not_equal(ids, self._table.default_value)\n ids = array_ops.where_v2(is_id_non_default, ids, buckets)\n else:\n ids = buckets\n if isinstance(keys, sparse_tensor.SparseTensor):\n return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape)\n elif isinstance(keys, internal.RaggedTensor):\n return keys.with_values(ids)\n return ids", + "docstring": "Looks up in the table, outputs the corresponding values. It assigns out-of-vocabulary keys to buckets based in their hashes. Args: keys: Keys to look up. May be either a or dense . name: Optional name for the op. Returns: A if keys are sparse, a if keys are ragged, otherwise a dense . Raises: TypeError: when doesn't match the table key data type.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:lookup arg:self arg:keys arg:name arguments arg arg arg If Compare Raise Call Assign If Call Assign If BoolOp Compare Assign Call If Compare Assign Call With Call Assign Call Assign Call Call If Assign Call Assign Call Call Assign Call Assign Call Assign If Call Return return:yes Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "disable_fake_quant", + "source_code": "def disable_fake_quant(mod):\n if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod):\n mod.disable_fake_quant()", + "docstring": "Disable fake quantization for the module. Disable fake quantization for this module, if applicable. Example usage:: # model is any PyTorch model model.apply(torch.ao.quantization.disable_fake_quant)", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fake_quantize.py", + "ast_data": "FunctionDef name:disable_fake_quant arg:mod arguments arg If BoolOp Call Call Call" + }, + { + "library": "tensorflow", + "name": "_tpu_device_name", + "source_code": "def _tpu_device_name(job, task, device):\n if job is None:\n return '/task:%d/device:TPU:%d' % (task, device)\n else:\n return '/job:%s/task:%d/device:TPU:%d' % (job, task, device)", + "docstring": "Returns the device name for the TPU on of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py", + "ast_data": "FunctionDef name:_tpu_device_name arg:job arg:task arg:device arguments arg arg arg If Compare Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "apply_str", + "source_code": "def apply_str(self) -> DataFrame | Series:\n func = cast(str, self.func)\n obj = self.obj\n from pandas.core.groupby.generic import DataFrameGroupBy, SeriesGroupBy\n method = getattr(obj, func, None)\n if callable(method):\n sig = inspect.getfullargspec(method)\n arg_names = (*sig.args, *sig.kwonlyargs)\n if self.axis != 0 and ('axis' not in arg_names or func in ('corrwith', 'skew')):\n raise ValueError(f'Operation {func} does not support axis=1')\n if 'axis' in arg_names and (not isinstance(obj, (SeriesGroupBy, DataFrameGroupBy))):\n self.kwargs['axis'] = self.axis\n return self._apply_str(obj, func, *self.args, **self.kwargs)", + "docstring": "Compute apply in case of a string. Returns ------- result: Series or DataFrame", + "type": "method", + "file_path": "pandas\\pandas\\core\\apply.py", + "ast_data": "FunctionDef name:apply_str arg:self arguments arg Assign Call Assign Assign Call If Call Assign Call Assign If BoolOp Compare BoolOp Compare Compare Raise Call If BoolOp Compare Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "probs_to_logits", + "source_code": "def probs_to_logits(probs, is_binary=False):\n ps_clamped = clamp_probs(probs)\n if is_binary:\n return torch.log(ps_clamped) - torch.log1p(-ps_clamped)\n return torch.log(ps_clamped)", + "docstring": "Converts a tensor of probabilities into logits. For the binary case, this denotes the probability of occurrence of the event indexed by . For the multi-dimensional case, the values along the last dimension denote the probabilities of occurrence of each of the events.", + "type": "function", + "file_path": "pytorch\\torch\\distributions\\utils.py", + "ast_data": "FunctionDef name:probs_to_logits arg:probs arg:is_binary arguments arg arg Assign Call If Return return:yes Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "__init__", + "source_code": "def __init__(self, type_input):\n if isinstance(type_input, OGRGeomType):\n num = type_input.num\n elif isinstance(type_input, str):\n type_input = type_input.lower()\n if type_input == 'geometry':\n type_input = 'unknown'\n num = self._str_types.get(type_input)\n if num is None:\n raise GDALException('Invalid OGR String Type \"%s\"' % type_input)\n elif isinstance(type_input, int):\n if type_input not in self._types:\n raise GDALException('Invalid OGR Integer Type: %d' % type_input)\n num = type_input\n else:\n raise TypeError('Invalid OGR input type given.')\n self.num = num", + "docstring": "Figure out the correct OGR Type based upon the input.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geomtype.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:type_input arguments arg arg If Call Assign If Call Assign Call If Compare Assign Assign Call If Compare Raise Call If Call If Compare Raise Call Assign Raise Call Assign" + }, + { + "library": "tensorflow", + "name": "split", + "source_code": "@tf_should_use.should_use_result\ndef split(self, value, lengths, name=None):\n with ops.name_scope(name, 'TensorArraySplit', [self._handle, value, lengths]):\n value = ops.convert_to_tensor(value, dtype=self._dtype, name='value')\n with self._maybe_colocate_with(value):\n lengths_64 = math_ops.cast(lengths, dtypes.int64)\n if not context.executing_eagerly():\n clengths = tensor_util.constant_value(lengths_64)\n if value.shape.dims is not None and clengths is not None:\n if clengths.shape and clengths.max() == clengths.min():\n self._check_element_shape(tensor_shape.TensorShape([clengths[0]]).concatenate(value.shape[1:]))\n flow_out = gen_data_flow_ops.tensor_array_split_v3(handle=self._handle, value=value, lengths=lengths_64, flow_in=self._flow, name=name)\n return build_ta_with_new_flow(self, flow_out)", + "docstring": "See TensorArray.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:split arg:self arg:value arg:lengths arg:name arguments arg arg arg arg With Call Assign Call With Call Assign Call If Call Assign Call If BoolOp Compare Compare If BoolOp Compare Call Call Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "clean_up", + "source_code": "def clean_up(self):\n now = self.now()\n for fname in os.listdir(self.storage_path):\n have_session = fname.startswith(self.SESSION_PREFIX) and (not fname.endswith(self.LOCK_SUFFIX))\n if have_session:\n path = os.path.join(self.storage_path, fname)\n self.acquire_lock(path)\n if self.debug:\n cherrypy.log('Cleanup lock acquired.', 'TOOLS.SESSIONS')\n try:\n contents = self._load(path)\n if contents is not None:\n data, expiration_time = contents\n if expiration_time < now:\n os.unlink(path)\n finally:\n self.release_lock(path)", + "docstring": "Clean up expired sessions.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:clean_up arg:self arguments arg Assign Call For Call Assign BoolOp Call Call If Assign Call Call If Call Try Assign Call If Compare Assign If Compare Call Call" + }, + { + "library": "matplotlib", + "name": "_tex_escape", + "source_code": "def _tex_escape(text):\n return text.replace('−', '\\\\ensuremath{-}')", + "docstring": "Do some necessary and/or useful substitutions for texts to be included in LaTeX documents.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py", + "ast_data": "FunctionDef name:_tex_escape arg:text arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_sanitize_column", + "source_code": "def _sanitize_column(self, value) -> tuple[ArrayLike, BlockValuesRefs | None]:\n self._ensure_valid_index(value)\n assert not isinstance(value, DataFrame)\n if is_dict_like(value):\n if not isinstance(value, Series):\n value = Series(value)\n return _reindex_for_setitem(value, self.index)\n if is_list_like(value):\n com.require_length_match(value, self.index)\n return (sanitize_array(value, self.index, copy=True, allow_2d=True), None)", + "docstring": "Ensures new columns (which go into the BlockManager as new blocks) are always copied (or a reference is being tracked to them under CoW) and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- tuple of numpy.ndarray or ExtensionArray and optional BlockValuesRefs", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:_sanitize_column arg:self arg:value arguments arg arg Call Call If Call If Call Assign Call Return return:yes Call If Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_assert_all_equal_and_return", + "source_code": "def _assert_all_equal_and_return(tensors, name=None):\n with ops.name_scope(name, 'assert_all_equal', values=tensors):\n if len(tensors) == 1:\n return tensors[0]\n assert_equal_ops = []\n for t in tensors[1:]:\n assert_equal_ops.append(check_ops.assert_equal(tensors[0], t))\n with ops.control_dependencies(assert_equal_ops):\n return array_ops.identity(tensors[0])", + "docstring": "Asserts that all tensors are equal and returns the first one.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py", + "ast_data": "FunctionDef name:_assert_all_equal_and_return arg:tensors arg:name arguments arg arg With Call If Compare Call Return return:yes Assign For Call Call With Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_num_buckets", + "source_code": "@property\ndef _num_buckets(self):\n return len(self.vocabulary_list) + self.num_oov_buckets", + "docstring": "Returns number of buckets in this sparse feature.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_num_buckets arg:self arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "__getstate__", + "source_code": "def __getstate__(self):\n state = super().__getstate__()\n state.pop('f_', None)\n return state", + "docstring": "Pickle-protocol - return state of the estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\isotonic.py", + "ast_data": "FunctionDef name:__getstate__ arg:self arguments arg Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "patch_torch_manual_seed", + "source_code": "@functools.lru_cache(None)\ndef patch_torch_manual_seed():\n\n def deterministic_torch_manual_seed(*args, **kwargs):\n from torch._C import default_generator\n seed = 1337\n if HAS_CUDA:\n import torch.cuda\n if not torch.cuda._is_in_bad_fork():\n torch.cuda.manual_seed_all(seed)\n if HAS_XPU:\n import torch.xpu\n if not torch.xpu._is_in_bad_fork():\n torch.xpu.manual_seed_all(seed)\n return default_generator.manual_seed(seed)\n torch.manual_seed = deterministic_torch_manual_seed", + "docstring": "Make torch manual seed deterministic. Helps with accuracy testing.", + "type": "function", + "file_path": "pytorch\\benchmarks\\dynamo\\common.py", + "ast_data": "FunctionDef name:patch_torch_manual_seed arguments FunctionDef name:deterministic_torch_manual_seed arguments arg arg Assign If If Call Call If If Call Call Return return:yes Call Assign Call" + }, + { + "library": "pandas", + "name": "_check_object_for_strings", + "source_code": "def _check_object_for_strings(values: np.ndarray) -> str:\n ndtype = values.dtype.name\n if ndtype == 'object':\n if lib.is_string_array(values, skipna=False):\n ndtype = 'string'\n return ndtype", + "docstring": "Check if we can use string hashtable instead of object hashtable. Parameters ---------- values : ndarray Returns ------- str", + "type": "function", + "file_path": "pandas\\pandas\\core\\algorithms.py", + "ast_data": "FunctionDef name:_check_object_for_strings arg:values arguments arg Assign If Compare If Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "to_dtype_bitcast", + "source_code": "def to_dtype_bitcast(self, x: T, dtype: torch.dtype, src_dtype: torch.dtype) -> T:\n raise NotImplementedError", + "docstring": "Reinterpret cast x to dtype (reinterpreting the bits in memory as another dtype.) src_dtype must be the original type of x.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ops_handler.py", + "ast_data": "FunctionDef name:to_dtype_bitcast arg:self arg:x arg:dtype arg:src_dtype arguments arg arg arg arg Raise" + }, + { + "library": "tensorflow", + "name": "most_specific_common_supertype", + "source_code": "def most_specific_common_supertype(self, others: Sequence[trace.TraceType]) -> Optional['Tuple']:\n if not all((isinstance(other, List) for other in others)):\n return None\n supertyped_components_tuple = self.components_tuple.most_specific_common_supertype([other.components_tuple for other in others])\n if supertyped_components_tuple is None:\n return None\n return List(*supertyped_components_tuple.components)", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\default_types.py", + "ast_data": "FunctionDef name:most_specific_common_supertype arg:self arg:others arguments arg arg If Call Call Return return:no Assign Call If Compare Return return:no Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_dtype_config", + "source_code": "def _get_dtype_config(obj: Any) -> DTypeConfig:\n if isinstance(obj, DTypeConfig):\n return obj\n if isinstance(obj, dict):\n return DTypeConfig.from_dict(obj)\n raise ValueError(f\"\"\"Expected a list of DTypeConfigs in backend_pattern_config_dict[\"{DTYPE_CONFIGS_DICT_KEY}\"], got '{type(obj)}'\"\"\")", + "docstring": "Convert the given object into a `` if possible, else throw an exception.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py", + "ast_data": "FunctionDef name:_get_dtype_config arg:obj arguments arg If Call Return return:yes If Call Return return:yes Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "_build_ragged_tensors", + "source_code": "def _build_ragged_tensors(serialized_shape, ragged_values, ragged_row_splits, ragged_inner_splits=None):\n if ragged_inner_splits is not None:\n ragged_values = [ragged_tensor.RaggedTensor.from_row_splits(val, split, validate=False) for val, split in zip(ragged_values, ragged_inner_splits)]\n if serialized_shape.ndims == 0:\n return ragged_values\n else:\n return [ragged_tensor.RaggedTensor.from_row_splits(val, split, validate=False) for val, split in zip(ragged_values, ragged_row_splits)]", + "docstring": "Builds RaggedTensors from the outputs of a parse op.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py", + "ast_data": "FunctionDef name:_build_ragged_tensors arg:serialized_shape arg:ragged_values arg:ragged_row_splits arg:ragged_inner_splits arguments arg arg arg arg If Compare Assign Call Call If Compare Return return:yes Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "as_default", + "source_code": "def as_default(self, step=None):\n return _SummaryContextManager(self, step)", + "docstring": "Returns a context manager that enables summary writing. For convenience, if is not None, this function also sets a default value for the parameter used in summary-writing functions elsewhere in the API so that it need not be explicitly passed in every such invocation. The value can be a constant or a variable. Note: when setting in a @tf.function, the step value will be captured at the time the function is traced, so changes to the step outside the function will not be reflected inside the function unless using a step. For example, can be used as: Args: step: An -castable default step value, or . When not , the current step is captured, replaced by a given one, and the original one is restored when the context manager exits. When , the current step is not modified (and not restored when the context manager exits). Returns: The context manager.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:as_default arg:self arg:step arguments arg arg Return return:yes Call" + }, + { + "library": "seaborn", + "name": "axes_ticklabels_overlap", + "source_code": "def axes_ticklabels_overlap(ax):\n return (axis_ticklabels_overlap(ax.get_xticklabels()), axis_ticklabels_overlap(ax.get_yticklabels()))", + "docstring": "Return booleans for whether the x and y ticklabels on an Axes overlap. Parameters ---------- ax : matplotlib Axes Returns ------- x_overlap, y_overlap : booleans True when the labels on that axis overlap.", + "type": "function", + "file_path": "seaborn\\seaborn\\utils.py", + "ast_data": "FunctionDef name:axes_ticklabels_overlap arg:ax arguments arg Return return:yes Call Call Call Call" + }, + { + "library": "seaborn", + "name": "has_xy_data", + "source_code": "@property\ndef has_xy_data(self):\n return bool({'x', 'y'} & set(self.variables))", + "docstring": "Return True at least one of x or y is defined.", + "type": "method", + "file_path": "seaborn\\seaborn\\_base.py", + "ast_data": "FunctionDef name:has_xy_data arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "jit_code_filter", + "source_code": "def jit_code_filter(code: CodeType) -> bool:\n if code.co_name != 'forward' and (not code.co_filename or code.co_filename[0] == '<'):\n return False\n filename = Path(code.co_filename).resolve()\n return not any((_startswith(filename, lib_path) for lib_path in LIB_PATHS))", + "docstring": "Codefilter for Torchscript to trace forward calls. The custom CodeFilter is required while scripting a FX Traced forward calls. FX Traced forward calls have start with '<' which is used to exclude tracing of stdlib and site-packages in the default code filter. Since we need all forward calls to be traced, this custom code filter checks for code.co_name to be 'forward' and enables tracing for all such calls. The code filter is similar to default code filter for monkeytype and excludes tracing of stdlib and site-packages.", + "type": "function", + "file_path": "pytorch\\torch\\jit\\_monkeytype_config.py", + "ast_data": "FunctionDef name:jit_code_filter arg:code arguments arg If BoolOp Compare BoolOp Compare Return return:yes Assign Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_elementwise_where", + "source_code": "def _elementwise_where(condition, x, y):\n condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor)\n x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor)\n y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor)\n if not (condition_is_ragged or x_is_ragged or y_is_ragged):\n return array_ops.where(condition, x, y)\n elif condition_is_ragged and x_is_ragged and y_is_ragged:\n return ragged_functional_ops.map_flat_values(array_ops.where, condition, x, y)\n elif not condition_is_ragged:\n condition.shape.assert_has_rank(1)\n x_and_y = ragged_concat_ops.concat([x, y], axis=0)\n x_nrows = _nrows(x, out_type=x_and_y.row_splits.dtype)\n y_nrows = _nrows(y, out_type=x_and_y.row_splits.dtype)\n indices = array_ops.where(condition, math_ops.range(x_nrows), x_nrows + math_ops.range(y_nrows))\n return ragged_gather_ops.gather(x_and_y, indices)\n else:\n raise ValueError('Input shapes do not match.')", + "docstring": "Ragged version of tf.where(condition, x, y).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_where_op.py", + "ast_data": "FunctionDef name:_elementwise_where arg:condition arg:x arg:y arguments arg arg arg Assign Call Assign Call Assign Call If BoolOp Return return:yes Call If BoolOp Return return:yes Call If Call Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes Call Raise Call" + }, + { + "library": "django", + "name": "kml", + "source_code": "@property\ndef kml(self):\n return capi.to_kml(self.ptr, None)", + "docstring": "Return the KML representation of the Geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:kml arg:self arguments arg Return return:yes Call" + }, + { + "library": "scrapy", + "name": "file_path", + "source_code": "@abstractmethod\ndef file_path(self, request: Request, response: Response | None=None, info: SpiderInfo | None=None, *, item: Any=None) -> str:\n raise NotImplementedError", + "docstring": "Returns the path where downloaded media should be stored", + "type": "method", + "file_path": "scrapy\\scrapy\\pipelines\\media.py", + "ast_data": "FunctionDef name:file_path arg:self arg:request arg:response arg:info arguments arg arg arg arg arg Raise" + }, + { + "library": "pytorch", + "name": "set_grad_enabled", + "source_code": "class set_grad_enabled(_DecoratorContextManager):\n\n def __init__(self, mode: bool) -> None:\n self.prev = torch.is_grad_enabled()\n self.mode = mode\n torch._C._set_grad_enabled(mode)\n\n def __call__(self, orig_func: F) -> F:\n torch._C._set_grad_enabled(self.prev)\n return super().__call__(orig_func)\n\n def __enter__(self) -> None:\n torch._C._set_grad_enabled(self.mode)\n\n def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:\n torch._C._set_grad_enabled(self.prev)\n\n def clone(self) -> 'set_grad_enabled':\n return self.__class__(self.mode)", + "docstring": "Context-manager that sets gradient calculation on or off. `modelocally-disable-grad-docforward-mode AD `. Example:: >>> # xdoctest: +SKIP >>> x = torch.tensor([1.], requires_grad=True) >>> is_train = False >>> with torch.set_grad_enabled(is_train): ... y = x * 2 >>> y.requires_grad False >>> _ = torch.set_grad_enabled(True) >>> y = x * 2 >>> y.requires_grad True >>> _ = torch.set_grad_enabled(False) >>> y = x * 2 >>> y.requires_grad False", + "type": "class", + "file_path": "pytorch\\torch\\autograd\\grad_mode.py", + "ast_data": "ClassDef name:set_grad_enabled FunctionDef name:__init__ arg:self arg:mode arguments arg arg Assign Call Assign Call FunctionDef name:__call__ arg:self arg:orig_func arguments arg arg Call Return return:yes Call Call FunctionDef name:__enter__ arg:self arguments arg Call FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg Call FunctionDef name:clone arg:self arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "DisallowedModelAdminLookup", + "source_code": "class DisallowedModelAdminLookup(SuspiciousOperation):\n pass", + "docstring": "Invalid filter was passed to admin view via URL querystring", + "type": "class", + "file_path": "django\\django\\contrib\\admin\\exceptions.py", + "ast_data": "ClassDef name:DisallowedModelAdminLookup" + }, + { + "library": "pytorch", + "name": "ConvAddReLU2d", + "source_code": "class ConvAddReLU2d(_FusedModule):\n\n def __init__(self, conv, add, relu):\n super().__init__(conv)\n self.add = add\n self.relu = relu\n\n def forward(self, x1, x2):\n return self.relu(self.add(self[0](x1), x2))", + "docstring": "This is a sequential container which calls the Conv2d, add, Relu. During quantization this will be replaced with the corresponding fused module.", + "type": "class", + "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py", + "ast_data": "ClassDef name:ConvAddReLU2d FunctionDef name:__init__ arg:self arg:conv arg:add arg:relu arguments arg arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:x1 arg:x2 arguments arg arg arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "_constrain_range_for_size", + "source_code": "def _constrain_range_for_size(a: SymInt, min: Optional[int]=None, max: Optional[int]=None) -> None:\n if isinstance(a, (SymFloat, SymBool)):\n raise ValueError('Constraining SymFloat/SymBool is nyi')\n assert isinstance(a, SymInt), 'can only constrain range for SymInt'\n assert isinstance(a.node.expr, sympy.Symbol), f'constraining non-Symbols NYI: {a}'\n a.node.shape_env._constrain_range_for_size(a.node.expr, min, max)", + "docstring": "This function is NOT INTENDED to be used by itself.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:_constrain_range_for_size arg:a arg:min arg:max arguments arg arg arg If Call Raise Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "range", + "source_code": "@tf_export('ragged.range')\n@dispatch.add_dispatch_support\ndef range(starts, limits=None, deltas=1, dtype=None, name=None, row_splits_dtype=dtypes.int64):\n row_splits_dtype = dtypes.as_dtype(row_splits_dtype)\n if limits is None:\n starts, limits = (0, starts)\n with ops.name_scope(name, 'RaggedRange', [starts, limits, deltas]) as name:\n starts = ops.convert_to_tensor(starts, dtype=dtype, name='starts')\n limits = ops.convert_to_tensor(limits, dtype=dtype, name='limits')\n deltas = ops.convert_to_tensor(deltas, dtype=dtype, name='deltas')\n if dtype is None:\n starts, limits, deltas = _infer_matching_dtype([starts, limits, deltas], [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64])\n result = gen_ragged_math_ops.ragged_range(starts, limits, deltas, Tsplits=row_splits_dtype, name=name)\n return ragged_tensor.RaggedTensor.from_row_splits(result.rt_dense_values, result.rt_nested_splits, validate=False)", + "docstring": "Returns a containing the specified sequences of numbers. Each row of the returned contains a single sequence: If , then will be an empty list. Similarly, if startslimitsdeltasTensorlimitsNone0TensorTensor1dtypeRaggedTensorrow_splitstf.int32tf.int64RaggedTensordtyperagged_rank=1`.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py", + "ast_data": "FunctionDef name:range arg:starts arg:limits arg:deltas arg:dtype arg:name arg:row_splits_dtype arguments arg arg arg arg arg arg Assign Call If Compare Assign With Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "__repr__", + "source_code": "def __repr__(self) -> str:\n if self._info_repr():\n buf = StringIO()\n self.info(buf=buf)\n return buf.getvalue()\n repr_params = fmt.get_dataframe_repr_params()\n return self.to_string(**repr_params)", + "docstring": "Return a string representation for a particular DataFrame.", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:__repr__ arg:self arguments arg If Call Assign Call Call Return return:yes Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "to_graphable", + "source_code": "def to_graphable(stuff):\n flat_args, spec = pytree.tree_flatten(stuff)\n for arg in flat_args:\n if not is_graphable(arg):\n raise RuntimeError(f'Expected all pytree.tree_leaves of (args, kwargs) to be graphable types, but found non-fx-graphable type {type(arg)}. If this type is meant to be constant, mark it as via pytree.register_constant; otherwise, register it as a pytree.')\n return (flat_args, spec)", + "docstring": "Flattens stuff into a flat list of graphable types.", + "type": "function", + "file_path": "pytorch\\torch\\_higher_order_ops\\flat_apply.py", + "ast_data": "FunctionDef name:to_graphable arg:stuff arguments arg Assign Call For If Call Raise Call Call Return return:yes" + }, + { + "library": "django", + "name": "name", + "source_code": "@property\ndef name(self):\n return force_str(capi.get_ds_description(self._ptr))", + "docstring": "Return the name of this raster. Corresponds to filename for file-based rasters.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_unpack_tensors", + "source_code": "def _unpack_tensors(reduced, tensor_packer=None):\n if tensor_packer:\n return tensor_packer.unpack(reduced)\n return reduced", + "docstring": "Unpack tensors if they are packed before all-reduce.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py", + "ast_data": "FunctionDef name:_unpack_tensors arg:reduced arg:tensor_packer arguments arg arg If Return return:yes Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "paintEvent", + "source_code": "def paintEvent(self, event):\n self._draw_idle()\n if not hasattr(self, 'renderer'):\n return\n painter = QtGui.QPainter(self)\n try:\n rect = event.rect()\n width = rect.width() * self.device_pixel_ratio\n height = rect.height() * self.device_pixel_ratio\n left, top = self.mouseEventCoords(rect.topLeft())\n bottom = top - height\n right = left + width\n bbox = Bbox([[left, bottom], [right, top]])\n buf = memoryview(self.copy_from_bbox(bbox))\n if QT_API == 'PyQt6':\n from PyQt6 import sip\n ptr = int(sip.voidptr(buf))\n else:\n ptr = buf\n painter.eraseRect(rect)\n qimage = QtGui.QImage(ptr, buf.shape[1], buf.shape[0], QtGui.QImage.Format.Format_RGBA8888)\n qimage.setDevicePixelRatio(self.device_pixel_ratio)\n origin = QtCore.QPoint(rect.left(), rect.top())\n painter.drawImage(origin, qimage)\n if QT_API == 'PySide2' and QtCore.__version_info__ < (5, 12):\n ctypes.c_long.from_address(id(buf)).value = 1\n self._draw_rect_callback(painter)\n finally:\n painter.end()", + "docstring": "Copy the image from the Agg canvas to the qt.drawable. In Qt, all drawing should be done inside of here when a widget is shown onscreen.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_qtagg.py", + "ast_data": "FunctionDef name:paintEvent arg:self arg:event arguments arg arg Call If Call Return return:no Assign Call Try Assign Call Assign Call Assign Call Assign Call Call Assign Assign Assign Call Assign Call Call If Compare Assign Call Call Assign Call Assign Call Call Assign Call Call Call Call If BoolOp Compare Compare Assign Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_identity_resolver", + "source_code": "@register_acd_resource_resolver\ndef _identity_resolver(op, resource_reads, resource_writes):\n del op\n\n def update(resource_inputs):\n to_remove = []\n to_add = []\n for resource in resource_inputs:\n if resource.op.type == 'Identity':\n to_remove.append(resource)\n to_add.extend(resource.op.inputs)\n for t in to_remove:\n resource_inputs.discard(t)\n resource_inputs.update(to_add)\n return to_add or to_remove\n return update(resource_reads) or update(resource_writes)", + "docstring": "Replaces Identity output with its input in resource_inputs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\auto_control_deps.py", + "ast_data": "FunctionDef name:_identity_resolver arg:op arg:resource_reads arg:resource_writes arguments arg arg arg FunctionDef name:update arg:resource_inputs arguments arg Assign Assign For If Compare Call Call For Call Call Return return:yes BoolOp Return return:yes BoolOp Call Call" + }, + { + "library": "scipy", + "name": "Trigonometric02", + "source_code": "class Trigonometric02(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-500.0] * self.N, [500.0] * self.N))\n self.custom_bounds = [(0, 2), (0, 2)]\n self.global_optimum = [[0.9 for _ in range(self.N)]]\n self.fglob = 1.0\n\n def fun(self, x, *args):\n self.nfev += 1\n vec = 8 * sin(7 * (x - 0.9) ** 2) ** 2 + 6 * sin(14 * (x - 0.9) ** 2) ** 2 + (x - 0.9) ** 2\n return 1.0 + sum(vec)", + "docstring": "Trigonometric 2 objective function. This class defines the Trigonometric 2 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Trigonometric2}}(x) = 1 + \\sum_{i=1}^{n} 8 \\sin^2 \\left[7(x_i - 0.9)^2 \\right] + 6 \\sin^2 \\left[14(x_i - 0.9)^2 \\right] + (x_i - 0.9)^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_T.py", + "ast_data": "ClassDef name:Trigonometric02 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_connect_nodes", + "source_code": "def _connect_nodes(self, first, second):\n if isinstance(first, Node):\n first.next.add(second)\n second.prev.add(first)\n self.forward_edges.add((first, second))\n else:\n for node in first:\n self._connect_nodes(node, second)", + "docstring": "Connects nodes to signify that control flows from first to second. Args: first: Union[Set[Node, ...], Node] second: Node", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py", + "ast_data": "FunctionDef name:_connect_nodes arg:self arg:first arg:second arguments arg arg arg If Call Call Call Call For Call" + }, + { + "library": "pytorch", + "name": "format_verification_infos", + "source_code": "def format_verification_infos(verification_infos: list[_verification.VerificationInfo]) -> str:\n return '\\n'.join((f'`{info.name}`: `max_abs_diff={info.max_abs_diff:e}`, `max_rel_diff={info.max_rel_diff:e}`, `abs_diff_hist={info.abs_diff_hist}`, `rel_diff_hist={info.rel_diff_hist}`' for info in verification_infos))", + "docstring": "Format the verification result. Args: verification_infos: The verification result. Returns: The formatted verification result.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_reporting.py", + "ast_data": "FunctionDef name:format_verification_infos arg:verification_infos arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "seek", + "source_code": "@deprecation.deprecated_args(None, 'position is deprecated in favor of the offset argument.', 'position')\ndef seek(self, offset=None, whence=0, position=None):\n self._preread_check()\n if offset is None and position is None:\n raise TypeError('seek(): offset argument required')\n if offset is not None and position is not None:\n raise TypeError('seek(): offset and position may not be set simultaneously.')\n if position is not None:\n offset = position\n if whence == 0:\n pass\n elif whence == 1:\n offset += self.tell()\n elif whence == 2:\n offset += self.size()\n else:\n raise errors.InvalidArgumentError(None, None, 'Invalid whence argument: {}. Valid values are 0, 1, or 2.'.format(whence))\n self._read_buf.seek(offset)", + "docstring": "Seeks to the offset in the file. Args: offset: The byte count relative to the whence argument. whence: Valid values for whence are: 0: start of the file (default) 1: relative to the current position of the file 2: relative to the end of file. is usually negative.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", + "ast_data": "FunctionDef name:seek arg:self arg:offset arg:whence arg:position arguments arg arg arg arg Call If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Raise Call If Compare Assign If Compare If Compare Call If Compare Call Raise Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "experimental_split_to_logical_devices", + "source_code": "def experimental_split_to_logical_devices(self, tensor, partition_dimensions):\n num_logical_devices_per_replica = self.extended._tpu_devices.shape[1]\n num_partition_splits = np.prod(partition_dimensions)\n input_shape = tensor.shape\n tensor_rank = len(input_shape)\n if tensor_rank != len(partition_dimensions):\n raise ValueError('Length of `partition_dimensions` must equal to the rank of `tensor.shape` ({}). Received len(partition_dimensions)={}.'.format(tensor_rank, len(partition_dimensions)))\n for dim_index, dim_size in enumerate(input_shape):\n if dim_size is None:\n continue\n split_size = partition_dimensions[dim_index]\n if dim_size % split_size != 0:\n raise ValueError('Tensor shape at `partition_dimensions[{}]` must be divisible by corresponding value specified by `partition_dimensions` ({}). Received: {}.'.format(dim_index, split_size, dim_size))\n if num_partition_splits != num_logical_devices_per_replica:\n raise ValueError('The product of `partition_dimensions` should be the same as the number of logical devices (={}). Received `partition_dimensions`={},and their product is {}.'.format(num_logical_devices_per_replica, partition_dimensions, num_partition_splits))\n tile_assignment = np.arange(num_partition_splits).reshape(partition_dimensions)\n return xla_sharding.tile(tensor, tile_assignment, use_sharding_op=True)", + "docstring": "Adds annotation that will be split across logical devices. This adds an annotation to tensor specifying that operations on will be split among multiple logical devices. Tensor will be split across dimensions specified by . The dimensions of must be divisible by corresponding value in . For example, for system with 8 logical devices, if is an image tensor with shape (batch_size, width, height, channel) and is [1, 2, 4, 1], then will be split 2 in width dimension and 4 way in height dimension and the split tensor values will be fed into 8 logical devices. Args: tensor: Input tensor to annotate. partition_dimensions: An unnested list of integers with the size equal to rank of specifying how will be partitioned. The product of all elements in must be equal to the total number of logical devices per replica. Raises: ValueError: 1) If the size of partition_dimensions does not equal to rank of or 2) if product of elements of does not match the number of logical devices per replica defined by the implementing DistributionStrategy's device specification or 3) if a known size of is not divisible by corresponding value in . Returns: Annotated tensor with identical value as .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py", + "ast_data": "FunctionDef name:experimental_split_to_logical_devices arg:self arg:tensor arg:partition_dimensions arguments arg arg arg Assign Assign Call Assign Assign Call If Compare Call Raise Call Call Call For Call If Compare Assign If Compare Raise Call Call If Compare Raise Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "StrCategoryLocator", + "source_code": "class StrCategoryLocator(ticker.Locator):\n\n def __init__(self, units_mapping):\n self._units = units_mapping\n\n def __call__(self):\n return list(self._units.values())\n\n def tick_values(self, vmin, vmax):\n return self()", + "docstring": "Tick at every integer mapping of the string data.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\category.py", + "ast_data": "ClassDef name:StrCategoryLocator FunctionDef name:__init__ arg:self arg:units_mapping arguments arg arg Assign FunctionDef name:__call__ arg:self arguments arg Return return:yes Call Call FunctionDef name:tick_values arg:self arg:vmin arg:vmax arguments arg arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_fill_non_empty_info", + "source_code": "def _fill_non_empty_info(self) -> None:\n self.add_object_type_line()\n self.add_index_range_line()\n self.add_series_name_line()\n self.add_header_line()\n self.add_separator_line()\n self.add_body_lines()\n self.add_dtypes_line()\n if self.display_memory_usage:\n self.add_memory_usage_line()", + "docstring": "Add lines to the info table, pertaining to non-empty series.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\info.py", + "ast_data": "FunctionDef name:_fill_non_empty_info arg:self arguments arg Call Call Call Call Call Call Call If Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "@_docstring.interpd\ndef __init__(self, xy, width, height, boxstyle='round', *, mutation_scale=1, mutation_aspect=1, **kwargs):\n super().__init__(**kwargs)\n self._x, self._y = xy\n self._width = width\n self._height = height\n self.set_boxstyle(boxstyle)\n self._mutation_scale = mutation_scale\n self._mutation_aspect = mutation_aspect\n self.stale = True", + "docstring": "Parameters ---------- xy : (float, float) The lower left corner of the box. width : float The width of the box. height : float The height of the box. boxstyle : str or The style of the fancy box. This can either be a instance or a string of the style name and optionally comma separated attributes (e.g. \"Round, pad=0.2\"). This string is passed to to construct a object. See there for a full documentation. The following box styles are available: %(BoxStyle:table)s mutation_scale : float, default: 1 Scaling factor applied to the attributes of the box style (e.g. pad or rounding_size). mutation_aspect : float, default: 1 The height of the rectangle will be squeezed by this value before the mutation and the mutated box will be stretched by the inverse of it. For example, this allows different horizontal and vertical padding. Other Parameters ---------------- **kwargs : properties %(Patch:kwdoc)s", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:xy arg:width arg:height arg:boxstyle arguments arg arg arg arg arg arg arg arg Call Call Assign Assign Assign Call Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "_coalesce_timeline", + "source_code": "def _coalesce_timeline(self, device_str):\n device = torch.device(device_str)\n times: list[int] = []\n sizes: list[list[int]] = []\n\n def update(key, version, delta):\n category = self.categories.get(key, version) if isinstance(key, TensorKey) else None\n index = _CATEGORY_TO_INDEX[category] + 1\n sizes[-1][index] += int(delta)\n t_min = -1\n for t, action, (key, version), numbytes in self.timeline:\n if key.device != device:\n continue\n if t != -1:\n t = int(t / 1000)\n if t_min == -1 or (t < t_min and t > 0):\n t_min = t\n if len(times) == 0:\n times.append(t)\n sizes.append([0] + [0 for _ in _CATEGORY_TO_INDEX])\n elif t != times[-1]:\n times.append(t)\n sizes.append(sizes[-1].copy())\n if action in (Action.PREEXISTING, Action.CREATE):\n update(key, version, numbytes)\n elif action == Action.INCREMENT_VERSION:\n update(key, version, -numbytes)\n update(key, version + 1, numbytes)\n elif action == Action.DESTROY:\n update(key, version, -numbytes)\n else:\n raise ValueError(f'Unknown action: {action}')\n times = [t_min if t < 0 else t for t in times]\n return (times, sizes)", + "docstring": "Convert the memory timeline and categories into a memory plot consisting of timestamps and their respective sizes by category for a given device. Input: device Output: [timestamps, sizes by category]", + "type": "method", + "file_path": "pytorch\\torch\\profiler\\_memory_profiler.py", + "ast_data": "FunctionDef name:_coalesce_timeline arg:self arg:device_str arguments arg arg Assign Call FunctionDef name:update arg:key arg:version arg:delta arguments arg arg arg Assign Call Call Assign Call Assign For If Compare If Compare Assign Call If BoolOp Compare BoolOp Compare Compare Assign If Compare Call Call Call If Compare Call Call Call If Compare Call If Compare Call Call If Compare Call Raise Call Assign Compare Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "@_docstring.interpd\ndef __init__(self, xy, radius=5, *, resolution=20, **kwargs):\n super().__init__(xy, resolution, radius=radius, orientation=0, **kwargs)", + "docstring": "Create a circle at *xy* = (*x*, *y*) with given *radius*. This circle is approximated by a regular polygon with *resolution* sides. For a smoother circle drawn with splines, see . Valid keyword arguments are: %(Patch:kwdoc)s", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:xy arg:radius arguments arg arg arg arg arg Call Call" + }, + { + "library": "scrapy", + "name": "re_rsearch", + "source_code": "def re_rsearch(pattern: str | Pattern[str], text: str, chunk_size: int=1024) -> tuple[int, int] | None:\n\n def _chunk_iter() -> Iterable[tuple[str, int]]:\n offset = len(text)\n while True:\n offset -= chunk_size * 1024\n if offset <= 0:\n break\n yield (text[offset:], offset)\n yield (text, 0)\n if isinstance(pattern, str):\n pattern = re.compile(pattern)\n for chunk, offset in _chunk_iter():\n matches = list(pattern.finditer(chunk))\n if matches:\n start, end = matches[-1].span()\n return (offset + start, offset + end)\n return None", + "docstring": "This function does a reverse search in a text using a regular expression given in the attribute 'pattern'. Since the re module does not provide this functionality, we have to find for the expression into chunks of text extracted from the end (for the sake of efficiency). At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for the pattern. If the pattern is not found, another chunk is extracted, and another search is performed. This process continues until a match is found, or until the whole file is read. In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing the start position of the match, and the ending (regarding the entire text).", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\python.py", + "ast_data": "FunctionDef name:re_rsearch arg:pattern arg:text arg:chunk_size arguments arg arg arg FunctionDef name:_chunk_iter arguments Assign Call While If Compare If Call Assign Call For Call Assign Call Call If Assign Call Return return:yes Return return:no" + }, + { + "library": "pytorch", + "name": "freeze", + "source_code": "def freeze(mod, preserved_attrs: Optional[list[str]]=None, optimize_numerics: bool=True):\n if not isinstance(mod, ScriptModule):\n raise RuntimeError(\"Freezing expects a ScriptModule as input. Please use torch.jit.script or torch.jit.trace to script your 'nn.Module'.\")\n if mod.training:\n raise RuntimeError('Freezing is currently only implemented for modules in eval mode. Please call .eval() on your module before freezing.')\n preserved_attrs = preserved_attrs if preserved_attrs is not None else []\n out = RecursiveScriptModule(torch._C._freeze_module(mod._c, preserved_attrs))\n RecursiveScriptModule._finalize_scriptmodule(out)\n preserved_methods = [x for x in preserved_attrs if mod._c._has_method(x)]\n run_frozen_optimizations(out, optimize_numerics, preserved_methods)\n return out", + "docstring": "Freeze ScriptModule, inline submodules, and attributes as constants. Freezing a :class: will clone it and attempt to inline the cloned module's submodules, parameters, and attributes as constants in the TorchScript IR Graph. By default, will be preserved, as well as attributes & methods specified in . Additionally, any attribute that is modified within a preserved method will be preserved. Freezing currently only accepts ScriptModules that are in eval mode. Freezing applies generic optimization that will speed up your model regardless of machine. To further optimize using server-specific settings, run after freezing. Args: mod (:class:): a module to be frozen preserved_attrs (Optional[List[str]]): a list of attributes to preserve in addition to the forward method. Attributes modified in preserved methods will also be preserved. optimize_numerics (bool): If `torch.jit.run_frozen_optimizationsScriptModuleversionmodified_tensordump_alias_dbtomap_locationtorch.jit.load`, however device-specific logic may have been baked into the model.", + "type": "function", + "file_path": "pytorch\\torch\\jit\\_freeze.py", + "ast_data": "FunctionDef name:freeze arg:mod arg:preserved_attrs arg:optimize_numerics arguments arg arg arg If Call Raise Call If Raise Call Assign Compare Assign Call Call Call Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "skew", + "source_code": "def skew(self, xShear, yShear):\n rx = math.tan(xShear)\n ry = math.tan(yShear)\n mtx = self._mtx\n (xx, xy, x0), (yx, yy, y0), _ = mtx.tolist()\n mtx[0, 0] += rx * yx\n mtx[0, 1] += rx * yy\n mtx[0, 2] += rx * y0\n mtx[1, 0] += ry * xx\n mtx[1, 1] += ry * xy\n mtx[1, 2] += ry * x0\n self.invalidate()\n return self", + "docstring": "Add a skew in place. *xShear* and *yShear* are the shear angles along the *x*- and *y*-axes, respectively, in radians. Returns *self*, so this method can easily be chained with more calls to :meth:, :meth:, :meth: and :meth:.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:skew arg:self arg:xShear arg:yShear arguments arg arg arg Assign Call Assign Call Assign Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "check_for_old_version", + "source_code": "def check_for_old_version(old_version, new_version):\n for old_ver in [old_version.string, old_version.pep_440_str]:\n check_for_lingering_string(old_ver)\n if major_minor_change(old_version, new_version):\n old_r_major_minor = 'r%s.%s' % (old_version.major, old_version.minor)\n check_for_lingering_string(old_r_major_minor)", + "docstring": "Check for old version references.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\update_version.py", + "ast_data": "FunctionDef name:check_for_old_version arg:old_version arg:new_version arguments arg arg For Call If Call Assign Call" + }, + { + "library": "scipy", + "name": "_display_iter", + "source_code": "def _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj, header=False):\n if header:\n print('Primal Feasibility ', 'Dual Feasibility ', 'Duality Gap ', 'Step ', 'Path Parameter ', 'Objective ')\n fmt = '{0:<20.13}{1:<20.13}{2:<20.13}{3:<17.13}{4:<20.13}{5:<20.13}'\n print(fmt.format(float(rho_p), float(rho_d), float(rho_g), alpha if isinstance(alpha, str) else float(alpha), float(rho_mu), float(obj)))", + "docstring": "Print indicators of optimization status to the console. Parameters ---------- rho_p : float The (normalized) primal feasibility, see [4] 4.5 rho_d : float The (normalized) dual feasibility, see [4] 4.5 rho_g : float The (normalized) duality gap, see [4] 4.5 alpha : float The step size, see [4] 4.3 rho_mu : float The (normalized) path parameter, see [4] 4.5 obj : float The objective function value of the current iterate header : bool True if a header is to be printed References ---------- .. [4] Andersen, Erling D., and Knud D. Andersen. \"The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm.\" High performance optimization. Springer US, 2000. 197-232.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_linprog_ip.py", + "ast_data": "FunctionDef name:_display_iter arg:rho_p arg:rho_d arg:rho_g arg:alpha arg:rho_mu arg:obj arg:header arguments arg arg arg arg arg arg arg If Call Assign Call Call Call Call Call Call Call Call Call" + }, + { + "library": "numpy", + "name": "mask_or", + "source_code": "def mask_or(m1, m2, copy=False, shrink=True):\n if m1 is nomask or m1 is False:\n dtype = getattr(m2, 'dtype', MaskType)\n return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)\n if m2 is nomask or m2 is False:\n dtype = getattr(m1, 'dtype', MaskType)\n return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype)\n if m1 is m2 and is_mask(m1):\n return _shrink_mask(m1) if shrink else m1\n dtype1, dtype2 = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))\n if dtype1 != dtype2:\n raise ValueError(f\"Incompatible dtypes '{dtype1}'<>'{dtype2}'\")\n if dtype1.names is not None:\n newmask = np.empty(np.broadcast(m1, m2).shape, dtype1)\n _recursive_mask_or(m1, m2, newmask)\n return newmask\n return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)", + "docstring": "Combine two masks with the `m1m2nomasknomasknomaskm1m2m1m2` have different flexible dtypes. Examples -------- >>> import numpy as np >>> m1 = np.ma.make_mask([0, 1, 1, 0]) >>> m2 = np.ma.make_mask([1, 0, 0, 0]) >>> np.ma.mask_or(m1, m2) array([ True, True, True, False])", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:mask_or arg:m1 arg:m2 arg:copy arg:shrink arguments arg arg arg arg If BoolOp Compare Compare Assign Call Return return:yes Call If BoolOp Compare Compare Assign Call Return return:yes Call If BoolOp Compare Call Return return:yes Call Assign Call Call If Compare Raise Call If Compare Assign Call Call Call Return return:yes Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_collect_nodes", + "source_code": "def _collect_nodes(self, start: Optional[str], end: Optional[str]) -> NodeList:\n nodes: NodeList = []\n add_node = start is None\n for node in self.module.graph.nodes:\n if node.op not in CALLABLE_NODE_OPS:\n continue\n if node.name == start:\n add_node = True\n if add_node:\n nodes.append(node)\n if node.name == end:\n break\n return nodes", + "docstring": "Collect nodes in the model that between nodes with name of and . These two nodes are also included.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\passes\\net_min_base.py", + "ast_data": "FunctionDef name:_collect_nodes arg:self arg:start arg:end arguments arg arg arg Assign Compare For If Compare If Compare Assign If Call If Compare Return return:yes" + }, + { + "library": "django", + "name": "_get_all_permissions", + "source_code": "def _get_all_permissions(opts):\n return [*_get_builtin_permissions(opts), *opts.permissions]", + "docstring": "Return (codename, name) for all permissions in the given opts.", + "type": "function", + "file_path": "django\\django\\contrib\\auth\\management\\__init__.py", + "ast_data": "FunctionDef name:_get_all_permissions arg:opts arguments arg Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "clear", + "source_code": "def clear(self):\n if not self.loaded:\n self.load()\n self._data.clear()", + "docstring": "Clean up the session-stored data. D.clear() -> None. Remove all items from D.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:clear arg:self arguments arg If Call Call" + }, + { + "library": "scrapy", + "name": "parse_nodes", + "source_code": "def parse_nodes(self, response: Response, nodes: Iterable[Selector]) -> Any:\n for selector in nodes:\n ret = iterate_spider_output(self.parse_node(response, selector))\n yield from self.process_results(response, ret)", + "docstring": "This method is called for the nodes matching the provided tag name (itertag). Receives the response and an Selector for each node. Overriding this method is mandatory. Otherwise, you spider won't work. This method must return either an item, a request, or a list containing any of them.", + "type": "method", + "file_path": "scrapy\\scrapy\\spiders\\feed.py", + "ast_data": "FunctionDef name:parse_nodes arg:self arg:response arg:nodes arguments arg arg arg For Assign Call Call Call" + }, + { + "library": "matplotlib", + "name": "_get_scalar_alpha", + "source_code": "def _get_scalar_alpha(self):\n return 1.0 if self._alpha is None or np.ndim(self._alpha) > 0 else self._alpha", + "docstring": "Get a scalar alpha value to be applied to the artist as a whole. If the alpha value is a matrix, the method returns 1.0 because pixels have individual alpha values (see for details). If the alpha value is a scalar, the method returns said value to be applied to the artist as a whole because pixels do not have individual alpha values.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\image.py", + "ast_data": "FunctionDef name:_get_scalar_alpha arg:self arguments arg Return return:yes BoolOp Compare Compare Call" + }, + { + "library": "matplotlib", + "name": "grab_frame", + "source_code": "@abc.abstractmethod\ndef grab_frame(self, **savefig_kwargs):\n pass", + "docstring": "Grab the image information from the figure and save as a movie frame. All keyword arguments in *savefig_kwargs* are passed on to the call that saves the figure. However, several keyword arguments that are supported by may not be passed as they are controlled by the MovieWriter: - *dpi*, *bbox_inches*: These may not be passed because each frame of the animation much be exactly the same size in pixels. - *format*: This is controlled by the MovieWriter.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\animation.py", + "ast_data": "FunctionDef name:grab_frame arg:self arguments arg arg" + }, + { + "library": "scikit-learn", + "name": "_initialize", + "source_code": "def _initialize(self, X, resp):\n nk, xk, sk = _estimate_gaussian_parameters(X, resp, self.reg_covar, self.covariance_type)\n self._estimate_weights(nk)\n self._estimate_means(nk, xk)\n self._estimate_precisions(nk, xk, sk)", + "docstring": "Initialization of the mixture parameters. Parameters ---------- X : array-like of shape (n_samples, n_features) resp : array-like of shape (n_samples, n_components)", + "type": "method", + "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py", + "ast_data": "FunctionDef name:_initialize arg:self arg:X arg:resp arguments arg arg arg Assign Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_Flatten", + "source_code": "def _Flatten(l):\n l_of_l = [x if _IsListValue(x) else [x] for x in l]\n return [item for sublist in l_of_l for item in sublist]", + "docstring": "Converts [1, 2, [3, 4], [5]] to [1, 2, 3, 4, 5].", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\op_def_library.py", + "ast_data": "FunctionDef name:_Flatten arg:l arguments arg Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='kl_divergence'):\n super().__init__(kl_divergence, name=name, reduction=reduction)", + "docstring": "Initializes instance. Args: reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. Defaults to 'kl_divergence'.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:reduction arg:name arguments arg arg arg Call Call" + }, + { + "library": "django", + "name": "default_units", + "source_code": "def default_units(self, kwargs):\n val = 0.0\n default_unit = self.STANDARD_UNIT\n for unit, value in kwargs.items():\n if not isinstance(value, float):\n value = float(value)\n if unit in self.UNITS:\n val += self.UNITS[unit] * value\n default_unit = unit\n elif unit in self.ALIAS:\n u = self.ALIAS[unit]\n val += self.UNITS[u] * value\n default_unit = u\n else:\n lower = unit.lower()\n if lower in self.UNITS:\n val += self.UNITS[lower] * value\n default_unit = lower\n elif lower in self.LALIAS:\n u = self.LALIAS[lower]\n val += self.UNITS[u] * value\n default_unit = u\n else:\n raise AttributeError('Unknown unit type: %s' % unit)\n return (val, default_unit)", + "docstring": "Return the unit value and the default units specified from the given keyword arguments dictionary.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\measure.py", + "ast_data": "FunctionDef name:default_units arg:self arg:kwargs arguments arg arg Assign Assign For Call If Call Assign Call If Compare Assign If Compare Assign Assign Assign Call If Compare Assign If Compare Assign Assign Raise Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "NoNorm", + "source_code": "class NoNorm(Normalize):\n\n def __call__(self, value, clip=None):\n if np.iterable(value):\n return np.ma.array(value)\n return value\n\n def inverse(self, value):\n if np.iterable(value):\n return np.ma.array(value)\n return value", + "docstring": "Dummy replacement for , for the case where we want to use indices directly in a .", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "ClassDef name:NoNorm FunctionDef name:__call__ arg:self arg:value arg:clip arguments arg arg arg If Call Return return:yes Call Return return:yes FunctionDef name:inverse arg:self arg:value arguments arg arg If Call Return return:yes Call Return return:yes" + }, + { + "library": "django", + "name": "get_response", + "source_code": "def get_response(self, request):\n set_urlconf(settings.ROOT_URLCONF)\n response = self._middleware_chain(request)\n response._resource_closers.append(request.close)\n if response.status_code >= 400:\n log_response('%s: %s', response.reason_phrase, request.path, response=response, request=request)\n return response", + "docstring": "Return an HttpResponse object for the given HttpRequest.", + "type": "method", + "file_path": "django\\django\\core\\handlers\\base.py", + "ast_data": "FunctionDef name:get_response arg:self arg:request arguments arg arg Call Assign Call Call If Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_check_rnn_cell_input_dtypes", + "source_code": "def _check_rnn_cell_input_dtypes(inputs):\n for t in nest.flatten(inputs):\n _check_supported_dtypes(t.dtype)", + "docstring": "Check whether the input tensors are with supported dtypes. Default RNN cells only support floats and complex as its dtypes since the activation function (tanh and sigmoid) only allow those types. This function will throw a proper error message if the inputs is not in a supported type. Args: inputs: tensor or nested structure of tensors that are feed to RNN cell as input or state. Raises: ValueError: if any of the input tensor are not having dtypes of float or complex.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py", + "ast_data": "FunctionDef name:_check_rnn_cell_input_dtypes arg:inputs arguments arg For Call Call" + }, + { + "library": "pytorch", + "name": "bool", + "source_code": "def bool(self):\n _warn_typed_storage_removal()\n return self._to(torch.bool)", + "docstring": "Casts this storage to bool type.", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:bool arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_find_extraneous_saver_nodes", + "source_code": "def _find_extraneous_saver_nodes(graph_def, saver_def):\n nodes = {node_def.name: (set((tensor.get_op_name(x) for x in node_def.input)), node_def.op) for node_def in graph_def.node}\n retain_scope_save = None\n retain_scope_restore = None\n if saver_def is not None:\n save_op_name = tensor.get_op_name(saver_def.save_tensor_name)\n restore_op_name = tensor.get_op_name(saver_def.restore_op_name)\n retain_scope_restore = _get_scope(restore_op_name) + '/'\n retain_scope_save = _get_scope(save_op_name) + '/'\n all_saver_node_names = set((name for name, (_, op) in nodes.items() if op in SAVE_AND_RESTORE_OPS))\n all_saver_scopes = set((_get_scope(x) for x in all_saver_node_names)) - all_saver_node_names\n all_saver_scopes = set((x + '/' for x in all_saver_scopes))\n extraneous_scopes = all_saver_scopes - set([retain_scope_save, retain_scope_restore])\n extraneous_node_names = set()\n for name, _ in nodes.items():\n for extraneous_scope in extraneous_scopes:\n if name.startswith(extraneous_scope):\n extraneous_node_names.add(name)\n break\n return extraneous_node_names", + "docstring": "Identifies any nodes in the graph_def related to unused Savers. This approach assumes that each Saver is cleanly isolated in its own name scope, so we need only identify the scopes associated with extraneous Savers and return all the nodes in those scopes. Args: graph_def: a GraphDef proto to evaluate. saver_def: a SaverDef proto referencing Save/Restore ops to be retained. Returns: An iterable of node names that may be safely omitted.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\meta_graph.py", + "ast_data": "FunctionDef name:_find_extraneous_saver_nodes arg:graph_def arg:saver_def arguments arg arg Assign Call Call Assign Assign If Compare Assign Call Assign Call Assign Call Assign Call Assign Call Call Compare Assign Call Call Assign Call Assign Call Assign Call For Call For If Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "dtype", + "source_code": "@property\ndef dtype(self):\n return self._inner_shape.dtype", + "docstring": "The dtype of the shape -- one of tf.int32 or tf.int64.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "MaybeCreateControlFlowState", + "source_code": "def MaybeCreateControlFlowState(between_op_list, between_ops, colocate_gradients_with_ops):\n loop_state = None\n for op in between_op_list:\n if util.IsLoopExit(op):\n if loop_state is None:\n loop_state = _ControlFlowState()\n if colocate_gradients_with_ops:\n with ops.colocate_with(op):\n loop_state.AddWhileContext(op, between_op_list, between_ops)\n else:\n loop_state.AddWhileContext(op, between_op_list, between_ops)\n return loop_state", + "docstring": "Create the state for all the while loops involved in one gradients(). We create a _ControlFlowState when there are while loops involved in gradients(). In gradients(), control flow logic is only invoked when the _ControlFlowState is not None. Note that this method modifies and .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py", + "ast_data": "FunctionDef name:MaybeCreateControlFlowState arg:between_op_list arg:between_ops arg:colocate_gradients_with_ops arguments arg arg arg Assign For If Call If Compare Assign Call If With Call Call Call Return return:yes" + }, + { + "library": "scrapy", + "name": "maybeDeferred_coro", + "source_code": "def maybeDeferred_coro(f: Callable[_P, Any], *args: _P.args, **kw: _P.kwargs) -> Deferred[Any]:\n try:\n result = f(*args, **kw)\n except:\n return fail(failure.Failure(captureVars=Deferred.debug))\n if isinstance(result, Deferred):\n return result\n if asyncio.isfuture(result) or inspect.isawaitable(result):\n return deferred_from_coro(result)\n if isinstance(result, failure.Failure):\n return fail(result)\n return succeed(result)", + "docstring": "Copy of defer.maybeDeferred that also converts coroutines to Deferreds.", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\defer.py", + "ast_data": "FunctionDef name:maybeDeferred_coro arg:f arguments arg arg arg Try Assign Call ExceptHandler Return return:yes Call Call If Call Return return:yes If BoolOp Call Call Return return:yes Call If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "experimental_type_proto", + "source_code": "@classmethod\ndef experimental_type_proto(cls) -> Type[struct_pb2.BoundedTensorSpecProto]:\n return struct_pb2.BoundedTensorSpecProto", + "docstring": "Returns the type of proto associated with BoundedTensorSpec serialization.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py", + "ast_data": "FunctionDef name:experimental_type_proto arg:cls arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_version", + "source_code": "@staticmethod\ndef _get_version(cmd: str | None) -> Any:\n if cmd is None:\n return None\n for line in check_output([cmd, '--version']).decode('utf-8').split('\\n'):\n if 'version' in line:\n return LooseVersion(line.strip().split(' ')[2])\n raise RuntimeError('no version found')", + "docstring": "Returns cmake version.", + "type": "method", + "file_path": "pytorch\\tools\\setup_helpers\\cmake.py", + "ast_data": "FunctionDef name:_get_version arg:cmd arguments arg If Compare Return return:no For Call Call Call If Compare Return return:yes Call Call Call Raise Call" + }, + { + "library": "tensorflow", + "name": "enter_section", + "source_code": "def enter_section(self, section_id):\n assert section_id not in self.exits\n self.exits[section_id] = set()", + "docstring": "Enters a regular section. Regular sections admit exit jumps, which end the section. Args: section_id: Hashable, the same node that will be used in calls to the ast_node arg passed to add_exit_node", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py", + "ast_data": "FunctionDef name:enter_section arg:self arg:section_id arguments arg arg Compare Assign Call" + }, + { + "library": "authlib", + "name": "create_endpoint_response", + "source_code": "def create_endpoint_response(self, request):\n client = self.authenticate_endpoint_client(request)\n token = self.authenticate_token(request, client)\n body = self.create_introspection_payload(token)\n return (200, body, default_json_headers)", + "docstring": "Validate introspection request and create the response. :returns: (status_code, body, headers)", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7662\\introspection.py", + "ast_data": "FunctionDef name:create_endpoint_response arg:self arg:request arguments arg arg Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scrapy", + "name": "maxpriority", + "source_code": "def maxpriority(self) -> int:\n if len(self) > 0:\n return max((cast(int, self.getpriority(name)) for name in self))\n return get_settings_priority('default')", + "docstring": "Return the numerical value of the highest priority present throughout all settings, or the numerical value for `~scrapy.settings.SETTINGS_PRIORITIES` if there are no settings stored.", + "type": "method", + "file_path": "scrapy\\scrapy\\settings\\__init__.py", + "ast_data": "FunctionDef name:maxpriority arg:self arguments arg If Compare Call Return return:yes Call Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "normalize_kernel2d", + "source_code": "def normalize_kernel2d(input: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(input, ['*', 'H', 'W'])\n norm = input.abs().sum(dim=-1).sum(dim=-1)\n return input / norm[..., None, None]", + "docstring": "Normalize both derivative and smoothing kernel.", + "type": "function", + "file_path": "kornia\\kornia\\filters\\kernels.py", + "ast_data": "FunctionDef name:normalize_kernel2d arg:input arguments arg Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "CleanCopier", + "source_code": "class CleanCopier(object):\n\n def __init__(self, preserve_annos):\n super(CleanCopier, self).__init__()\n self.preserve_annos = preserve_annos\n\n def copy(self, node):\n if isinstance(node, list):\n return [self.copy(n) for n in node]\n elif isinstance(node, tuple):\n return tuple((self.copy(n) for n in node))\n elif not isinstance(node, (gast.AST, ast.AST)):\n return node\n assert isinstance(node, (gast.AST, ast.AST))\n new_fields = {}\n for f in node._fields:\n if not f.startswith('__') and hasattr(node, f):\n new_fields[f] = self.copy(getattr(node, f))\n new_node = type(node)(**new_fields)\n if self.preserve_annos:\n for k in self.preserve_annos:\n anno.copyanno(node, new_node, k)\n return new_node", + "docstring": "NodeTransformer-like visitor that copies an AST.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\ast_util.py", + "ast_data": "ClassDef name:CleanCopier FunctionDef name:__init__ arg:self arg:preserve_annos arguments arg arg Call Call Assign FunctionDef name:copy arg:self arg:node arguments arg arg If Call Return return:yes Call If Call Return return:yes Call Call If Call Return return:yes Call Assign For If BoolOp Call Call Assign Call Call Assign Call Call If For Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "classproperty", + "source_code": "def classproperty(func):\n if not isinstance(func, (classmethod, staticmethod)):\n func = classmethod(func)\n return _ClassPropertyDescriptor(func)", + "docstring": "Decorator like classmethod to implement a static class property.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\_helper.py", + "ast_data": "FunctionDef name:classproperty arg:func arguments arg If Call Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "Scaled", + "source_code": "class Scaled(_Base):\n\n def __init__(self, scalable_size):\n self._scalable_size = scalable_size\n\n def get_size(self, renderer):\n rel_size = self._scalable_size\n abs_size = 0.0\n return (rel_size, abs_size)", + "docstring": "Simple scaled(?) size with absolute part = 0 and relative part = *scalable_size*.", + "type": "class", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py", + "ast_data": "ClassDef name:Scaled FunctionDef name:__init__ arg:self arg:scalable_size arguments arg arg Assign FunctionDef name:get_size arg:self arg:renderer arguments arg arg Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "get_detector_name", + "source_code": "@abstractmethod\ndef get_detector_name(self) -> str:\n pass", + "docstring": "Returns the name of the current detector", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py", + "ast_data": "FunctionDef name:get_detector_name arg:self arguments arg" + }, + { + "library": "matplotlib", + "name": "_add_data_doc", + "source_code": "def _add_data_doc(docstring, replace_names):\n if docstring is None or (replace_names is not None and len(replace_names) == 0):\n return docstring\n docstring = inspect.cleandoc(docstring)\n data_doc = ' If given, all parameters also accept a string ``s``, which is\\n interpreted as ``data[s]`` if ``s`` is a key in ``data``.' if replace_names is None else f' If given, the following parameters also accept a string ``s``, which is\\n interpreted as ``data[s]`` if ``s`` is a key in ``data``:\\n\\n {', '.join(map('*{}*'.format, replace_names))}'\n if _log.level <= logging.DEBUG:\n if 'data : indexable object, optional' not in docstring:\n _log.debug('data parameter docstring error: no data parameter')\n if 'DATA_PARAMETER_PLACEHOLDER' not in docstring:\n _log.debug('data parameter docstring error: missing placeholder')\n return docstring.replace(' DATA_PARAMETER_PLACEHOLDER', data_doc)", + "docstring": "Add documentation for a *data* field to the given docstring. Parameters ---------- docstring : str The input docstring. replace_names : list of str or None The list of parameter names which arguments should be replaced by `` does not throw an exception). If None, replacement is attempted for all arguments. Returns ------- str The augmented docstring.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\__init__.py", + "ast_data": "FunctionDef name:_add_data_doc arg:docstring arg:replace_names arguments arg arg If BoolOp Compare BoolOp Compare Compare Call Return return:yes Assign Call Assign Compare Call Call If Compare If Compare Call If Compare Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "with_dtype", + "source_code": "def with_dtype(self, dtype):\n dtype = dtypes.as_dtype(dtype)\n if dtype not in (dtypes.int32, dtypes.int64):\n raise ValueError('dtype must be int32 or int64')\n if self.dtype == dtype:\n return self\n return RowPartition(row_splits=_cast_if_not_none(self._row_splits, dtype), row_lengths=_cast_if_not_none(self._row_lengths, dtype), value_rowids=_cast_if_not_none(self._value_rowids, dtype), nrows=_cast_if_not_none(self._nrows, dtype), uniform_row_length=_cast_if_not_none(self._uniform_row_length, dtype), internal=_row_partition_factory_key)", + "docstring": "Returns a copy of this RowPartition with the given encoding dtype. Args: dtype: The dtype for encoding tensors, such as and . One of or . Returns: A copy of this RowPartition, with the encoding tensors cast to the given type.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:with_dtype arg:self arg:dtype arguments arg arg Assign Call If Compare Raise Call If Compare Return return:yes Return return:yes Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_collect_partitioned_variable", + "source_code": "def _collect_partitioned_variable(name, all_vars):\n if name + '/part_0' in all_vars:\n var = []\n i = 0\n while name + '/part_%d' % i in all_vars:\n var.append(all_vars[name + '/part_%d' % i])\n i += 1\n return var\n return None", + "docstring": "Returns list of that comprise the partitioned variable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\checkpoint_utils.py", + "ast_data": "FunctionDef name:_collect_partitioned_variable arg:name arg:all_vars arguments arg arg If Compare Assign Assign While Compare Call Return return:yes Return return:no" + }, + { + "library": "tensorflow", + "name": "broadcast_tensor", + "source_code": "def broadcast_tensor(self, tensor):\n return array_ops.gather(tensor, self.gather_index)", + "docstring": "Broadcast from a dense tensor. It is assumed that the first axis of the dense tensor is indexed by the source shape, and at the end, the first axis of the dense tensor is indexed by the destination shape. Args: tensor: a dense tensor. Returns: A dense tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:broadcast_tensor arg:self arg:tensor arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "watershed_ift", + "source_code": "def watershed_ift(input, markers, structure=None, output=None):\n input = np.asarray(input)\n if input.dtype.type not in [np.uint8, np.uint16]:\n raise TypeError('only 8 and 16 unsigned inputs are supported')\n if structure is None:\n structure = _morphology.generate_binary_structure(input.ndim, 1)\n structure = np.asarray(structure, dtype=bool)\n if structure.ndim != input.ndim:\n raise RuntimeError('structure and input must have equal rank')\n for ii in structure.shape:\n if ii != 3:\n raise RuntimeError('structure dimensions must be equal to 3')\n if not structure.flags.contiguous:\n structure = structure.copy()\n markers = np.asarray(markers)\n if input.shape != markers.shape:\n raise RuntimeError('input and markers must have equal shape')\n integral_types = [np.int8, np.int16, np.int32, np.int64, np.intc, np.intp]\n if markers.dtype.type not in integral_types:\n raise RuntimeError('marker should be of integer type')\n if isinstance(output, np.ndarray):\n if output.dtype.type not in integral_types:\n raise RuntimeError('output should be of integer type')\n else:\n output = markers.dtype\n output = _ni_support._get_output(output, input)\n _nd_image.watershed_ift(input, markers, structure, output)\n return output", + "docstring": "Apply watershed from markers using image foresting transform algorithm. Parameters ---------- input : array_like Input. markers : array_like Markers are points within each watershed that form the beginning of the process. Negative markers are considered background markers which are processed after the other markers. structure : structure element, optional A structuring element defining the connectivity of the object can be provided. If None, an element is generated with a squared connectivity equal to one. output : ndarray, optional An output array can optionally be provided. The same shape as input. Returns ------- watershed_ift : ndarray Output. Same shape as . References ---------- .. [1] A.X. Falcao, J. Stolfi and R. de Alencar Lotufo, \"The image foresting transform: theory, algorithms, and applications\", Pattern Analysis and Machine Intelligence, vol. 26, pp. 19-29, 2004.", + "type": "function", + "file_path": "scipy\\scipy\\ndimage\\_measurements.py", + "ast_data": "FunctionDef name:watershed_ift arg:input arg:markers arg:structure arg:output arguments arg arg arg arg Assign Call If Compare Raise Call If Compare Assign Call Assign Call If Compare Raise Call For If Compare Raise Call If Assign Call Assign Call If Compare Raise Call Assign If Compare Raise Call If Call If Compare Raise Call Assign Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "size", + "source_code": "def size(self):\n return stat(self.__name).length", + "docstring": "Returns the size of the file.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", + "ast_data": "FunctionDef name:size arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "nvals", + "source_code": "def nvals(self):\n return self._row_splits[-1]", + "docstring": "Returns the number of values partitioned by this . If the sequence partitioned by this is a tensor, then is the size of that tensor's outermost dimension -- i.e., . Returns: scalar integer Tensor", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:nvals arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_retrieve_constant", + "source_code": "def _retrieve_constant(spec: 'TreeSpec') -> Any:\n assert _is_constant_holder(spec)\n return tree_unflatten([], spec)", + "docstring": "Given a spec from a pytree registered with register_constant, retrieves the constant", + "type": "function", + "file_path": "pytorch\\torch\\utils\\_pytree.py", + "ast_data": "FunctionDef name:_retrieve_constant arg:spec arguments arg Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "StructuredVoidFormat", + "source_code": "class StructuredVoidFormat:\n\n def __init__(self, format_functions):\n self.format_functions = format_functions\n\n @classmethod\n def from_data(cls, data, **options):\n format_functions = []\n for field_name in data.dtype.names:\n format_function = _get_format_function(data[field_name], **options)\n if data.dtype[field_name].shape != ():\n format_function = SubArrayFormat(format_function, **options)\n format_functions.append(format_function)\n return cls(format_functions)\n\n def __call__(self, x):\n str_fields = [format_function(field) for field, format_function in zip(x, self.format_functions)]\n if len(str_fields) == 1:\n return f'({str_fields[0]},)'\n else:\n return f'({', '.join(str_fields)})'", + "docstring": "Formatter for structured np.void objects. This does not work on structured alias types like np.dtype(('i4', 'i2,i2')), as alias scalars lose their field information, and the implementation relies upon np.void.__getitem__.", + "type": "class", + "file_path": "numpy\\numpy\\_core\\arrayprint.py", + "ast_data": "ClassDef name:StructuredVoidFormat FunctionDef name:__init__ arg:self arg:format_functions arguments arg arg Assign FunctionDef name:from_data arg:cls arg:data arguments arg arg arg Assign For Assign Call If Compare Assign Call Call Return return:yes Call FunctionDef name:__call__ arg:self arg:x arguments arg arg Assign Call Call If Compare Call Return return:yes Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "monochrome", + "source_code": "@property\ndef monochrome(self):\n if not self._isinit:\n self._init()\n return self.N <= 1 or np.all(self._lut[0] == self._lut[1:self.N])", + "docstring": "Return whether all colors in the colormap are identical.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:monochrome arg:self arguments arg If Call Return return:yes BoolOp Compare Call Compare" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n if not is_scalar_nan(self.missing_values):\n ensure_all_finite = True\n else:\n ensure_all_finite = 'allow-nan'\n X = validate_data(self, X, accept_sparse=False, dtype=FLOAT_DTYPES, ensure_all_finite=ensure_all_finite, copy=self.copy)\n self._fit_X = X\n self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)\n self._valid_mask = ~np.all(self._mask_fit_X, axis=0)\n super()._fit_indicator(self._mask_fit_X)\n return self", + "docstring": "Fit the imputer on X. Parameters ---------- X : array-like shape of (n_samples, n_features) Input data, where is the number of samples and is the number of features. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object The fitted class instance.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\impute\\_knn.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg If Call Assign Assign Assign Call Assign Assign Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "find_lint_bot_comments", + "source_code": "def find_lint_bot_comments(repo, token, pr_number):\n response = requests.get(f'https://api.github.com/repos/{repo}/issues/{pr_number}/comments', headers=get_headers(token))\n response.raise_for_status()\n all_comments = response.json()\n failed_comment = '❌ Linting issues'\n success_comment = '✔️ Linting Passed'\n comments = [comment for comment in all_comments if comment['user']['login'] == 'github-actions[bot]' and (failed_comment in comment['body'] or success_comment in comment['body'])]\n if len(all_comments) > 25 and (not comments):\n raise RuntimeError('Comment not found in the first 30 comments.')\n return comments[0] if comments else None", + "docstring": "Get the comment from the linting bot.", + "type": "function", + "file_path": "scikit-learn\\build_tools\\get_comment.py", + "ast_data": "FunctionDef name:find_lint_bot_comments arg:repo arg:token arg:pr_number arguments arg arg arg Assign Call Call Call Assign Call Assign Assign Assign BoolOp Compare BoolOp Compare Compare If BoolOp Compare Call Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "coverage_experiment", + "source_code": "def coverage_experiment(args, model_iter_fn, model, example_inputs):\n profiler = Profiler()\n frozen_model_iter_fn = torch._dynamo.run(model_iter_fn)\n with profiler.prof:\n frozen_model_iter_fn(model, example_inputs)\n coverage_result = profiler.results()\n write_outputs(output_filename, ('dev', 'name', 'batch_size', 'graphs', 'graph_calls', 'captured_ops', 'total_ops', 'pct_ops', 'pct_time'), [current_device, current_name, current_batch_size] + coverage_result.tocsv())\n return coverage_result", + "docstring": "Test operator/model coverage of TorchDynamo and record statistics taken from a profiler. This target is mainly intended to check correctness. Writes to ./coverage.csv", + "type": "function", + "file_path": "pytorch\\benchmarks\\dynamo\\common.py", + "ast_data": "FunctionDef name:coverage_experiment arg:args arg:model_iter_fn arg:model arg:example_inputs arguments arg arg arg arg Assign Call Assign Call With Call Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "schedule", + "source_code": "def schedule(snode):\n scheduled.append(snode)\n for buf_name in snode.get_buffer_names():\n for snode in buffer_users[buf_name]:\n unmet_deps[snode].remove(buf_name)\n if len(unmet_deps[snode]) == 0:\n heapq.heappush(ready, Runnable(snode))", + "docstring": "Schedules and put all unblocked nodes onto the ready queue.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\comms.py", + "ast_data": "FunctionDef name:schedule arg:snode arguments arg Call For Call For Call If Compare Call Call Call" + }, + { + "library": "scikit-learn", + "name": "_extract_missing", + "source_code": "def _extract_missing(values):\n missing_values_set = {value for value in values if value is None or is_scalar_nan(value)}\n if not missing_values_set:\n return (values, MissingValues(nan=False, none=False))\n if None in missing_values_set:\n if len(missing_values_set) == 1:\n output_missing_values = MissingValues(nan=False, none=True)\n else:\n output_missing_values = MissingValues(nan=True, none=True)\n else:\n output_missing_values = MissingValues(nan=True, none=False)\n output = values - missing_values_set\n return (output, output_missing_values)", + "docstring": "Extract missing values from . Parameters ---------- values: set Set of values to extract missing from. Returns ------- output: set Set with missing values extracted. missing_values: MissingValues Object with missing value information.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_encode.py", + "ast_data": "FunctionDef name:_extract_missing arg:values arguments arg Assign BoolOp Compare Call If Return return:yes Call If Compare If Compare Call Assign Call Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "cplx01_f", + "source_code": "def cplx01_f(z, n, a):\n return z ** n - a", + "docstring": "z**n-a: Use to find the nth root of a", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_tstutils.py", + "ast_data": "FunctionDef name:cplx01_f arg:z arg:n arg:a arguments arg arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "nodes", + "source_code": "@property\ndef nodes(self) -> _node_list:\n return _node_list(self)", + "docstring": "Get the list of Nodes that constitute this Graph. Note that this `` can be called on this list to switch iteration order.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\graph.py", + "ast_data": "FunctionDef name:nodes arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_default_initializer", + "source_code": "def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):\n del shape\n if dtype.is_floating:\n initializer = init_ops.glorot_uniform_initializer()\n initializing_from_value = False\n elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool or (dtype == dtypes.string):\n initializer = init_ops.zeros_initializer()\n initializing_from_value = False\n else:\n raise ValueError('An initializer for variable %s of %s is required' % (name, dtype.base_dtype))\n return (initializer, initializing_from_value)", + "docstring": "Provide a default initializer and a corresponding value. Args: name: see get_variable. shape: see get_variable. dtype: see get_variable. Returns: initializer and initializing_from_value. See get_variable above. Raises: ValueError: When giving unsupported dtype.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\variable_scope_shim.py", + "ast_data": "FunctionDef name:_get_default_initializer arg:self arg:name arg:shape arg:dtype arguments arg arg arg arg If Assign Call Assign If BoolOp Compare Assign Call Assign Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_node_get", + "source_code": "def _node_get(node: torch._C.Node, key: str):\n sel = node.kindOf(key)\n return getattr(node, sel)(key)", + "docstring": "Get attributes of a node which is polymorphic over return type.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\tensorboard\\_pytorch_graph.py", + "ast_data": "FunctionDef name:_node_get arg:node arg:key arguments arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "ConnectionWrapper", + "source_code": "class ConnectionWrapper:\n\n def __init__(self, conn):\n self.conn = conn\n\n def send(self, obj):\n buf = io.BytesIO()\n ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj)\n self.send_bytes(buf.getvalue())\n\n def recv(self):\n buf = self.recv_bytes()\n return pickle.loads(buf)\n\n def __getattr__(self, name):\n if 'conn' in self.__dict__:\n return getattr(self.conn, name)\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute 'conn'\")", + "docstring": "Proxy class for _multiprocessing.Connection which uses ForkingPickler for object serialization.", + "type": "class", + "file_path": "pytorch\\torch\\multiprocessing\\queue.py", + "ast_data": "ClassDef name:ConnectionWrapper FunctionDef name:__init__ arg:self arg:conn arguments arg arg Assign FunctionDef name:send arg:self arg:obj arguments arg arg Assign Call Call Call Call Call FunctionDef name:recv arg:self arguments arg Assign Call Return return:yes Call FunctionDef name:__getattr__ arg:self arg:name arguments arg arg If Compare Return return:yes Call Raise Call Call" + }, + { + "library": "pytorch", + "name": "_generate_square_subsequent_mask", + "source_code": "def _generate_square_subsequent_mask(sz: int, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Tensor:\n return torch.triu(torch.full((sz, sz), float('-inf'), dtype=dtype, device=device), diagonal=1)", + "docstring": "Generate a square causal mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0).", + "type": "function", + "file_path": "pytorch\\torch\\nn\\modules\\transformer.py", + "ast_data": "FunctionDef name:_generate_square_subsequent_mask arg:sz arg:device arg:dtype arguments arg arg arg Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "_put_val_to_limits", + "source_code": "def _put_val_to_limits(a, limits, inclusive, val=np.nan, xp=None):\n xp = array_namespace(a) if xp is None else xp\n mask = xp.zeros_like(a, dtype=xp.bool)\n if limits is None:\n return (a, mask)\n lower_limit, upper_limit = limits\n lower_include, upper_include = inclusive\n if lower_limit is not None:\n mask |= a < lower_limit if lower_include else a <= lower_limit\n if upper_limit is not None:\n mask |= a > upper_limit if upper_include else a >= upper_limit\n lazy = is_lazy_array(mask)\n if not lazy and xp.all(mask):\n raise ValueError('No array values within given limits')\n if lazy or xp.any(mask):\n a = xp.where(mask, val, a)\n return (a, mask)", + "docstring": "Replace elements outside limits with a value. This is primarily a utility function. Parameters ---------- a : array limits : (float or None, float or None) A tuple consisting of the (lower limit, upper limit). Elements in the input array less than the lower limit or greater than the upper limit will be replaced with . None implies no limit. inclusive : (bool, bool) A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to lower or upper are allowed. val : float, default: NaN The value with which extreme elements of the array are replaced.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:_put_val_to_limits arg:a arg:limits arg:inclusive arg:val arg:xp arguments arg arg arg arg arg Assign Compare Call Assign Call If Compare Return return:yes Assign Assign If Compare Compare Compare If Compare Compare Compare Assign Call If BoolOp Call Raise Call If BoolOp Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "display_benchmark_results", + "source_code": "def display_benchmark_results(timer_list, metric_name):\n mean_time = statistics.mean(timer_list)\n stdev_time = statistics.stdev(timer_list)\n stdev_time_percentage = stdev_time / mean_time * 100\n print('%s: %.2f ms ± %.2f%%' % (metric_name, mean_time, stdev_time_percentage))", + "docstring": "Display mean and stdev for a given metric.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\cpu\\benchmarks\\e2e\\gemma2\\pytorch_2b\\benchmark.py", + "ast_data": "FunctionDef name:display_benchmark_results arg:timer_list arg:metric_name arguments arg arg Assign Call Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "_maybe_change_to_function_call", + "source_code": "def _maybe_change_to_function_call(self, parent, node, full_name):\n if full_name in self._api_change_spec.change_to_function:\n if not isinstance(parent, ast.Call):\n new_node = ast.Call(node, [], [])\n pasta.ast_utils.replace_child(parent, node, new_node)\n ast.copy_location(new_node, node)\n self.add_log(INFO, node.lineno, node.col_offset, 'Changed %r to a function call' % full_name)\n return True\n return False", + "docstring": "Wraps node (typically, an Attribute or Expr) in a Call.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py", + "ast_data": "FunctionDef name:_maybe_change_to_function_call arg:self arg:parent arg:node arg:full_name arguments arg arg arg arg If Compare If Call Assign Call Call Call Call Return return:yes Return return:yes" + }, + { + "library": "cherrypy", + "name": "HelloWorld", + "source_code": "class HelloWorld:\n\n @cherrypy.expose\n def index(self):\n return 'We have an important message for you!'\n\n @cherrypy.expose\n def show_msg(self):\n return 'Hello world!'", + "docstring": "Hello world app.", + "type": "class", + "file_path": "cherrypy\\cherrypy\\tutorial\\tut02_expose_methods.py", + "ast_data": "ClassDef name:HelloWorld FunctionDef name:index arg:self arguments arg Return return:yes FunctionDef name:show_msg arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_normalize_start_end", + "source_code": "def _normalize_start_end(x: Tensor, dim: int, start: Optional[int], end: Optional[int]) -> tuple[int, int]:\n dim_size = x.shape[dim]\n\n def clamp_wrap(val, lower, upper, default) -> int:\n if val is None:\n return default\n if val < 0:\n val = val + dim_size\n return min(max(val, lower), upper)\n start = clamp_wrap(start, 0, dim_size, 0)\n end = clamp_wrap(end, start, dim_size, dim_size)\n return (start, end)", + "docstring": "Normalize start and end such that both are in the range [0, x.get_size()[dim]] and start <= end.", + "type": "function", + "file_path": "pytorch\\torch\\_decomp\\decompositions.py", + "ast_data": "FunctionDef name:_normalize_start_end arg:x arg:dim arg:start arg:end arguments arg arg arg arg Assign FunctionDef name:clamp_wrap arg:val arg:lower arg:upper arg:default arguments arg arg arg arg If Compare Return return:yes If Compare Assign Return return:yes Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "pol2cart", + "source_code": "def pol2cart(rho: Tensor, phi: Tensor) -> tuple[Tensor, Tensor]:\n if not isinstance(rho, Tensor) & isinstance(phi, Tensor):\n raise TypeError(f'Input type is not a Tensor. Got {type(rho)}, {type(phi)}')\n x = rho * cos(phi)\n y = rho * sin(phi)\n return (x, y)", + "docstring": "Convert polar coordinates to cartesian coordinates. Args: rho: Tensor of arbitrary shape. phi: Tensor of same arbitrary shape. Returns: - x: Tensor with same shape as input. - y: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi)", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:pol2cart arg:rho arg:phi arguments arg arg If Call Call Raise Call Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_nvml_device_index", + "source_code": "def _get_nvml_device_index(device: Optional[Union[int, Device]]) -> int:\n idx = _get_device_index(device, optional=True)\n visible_devices = _parse_visible_devices()\n if type(visible_devices[0]) is str:\n uuids = _raw_device_uuid_nvml()\n if uuids is None:\n raise RuntimeError(\"Can't get device UUIDs\")\n visible_devices = _transform_uuid_to_ordinals(cast(list[str], visible_devices), uuids)\n visible_devices = cast(list[int], visible_devices)\n if idx < 0 or idx >= len(visible_devices):\n raise RuntimeError(f'device {idx} is not visible (CUDA_VISIBLE_DEVICES={visible_devices})')\n return visible_devices[idx]", + "docstring": "Return the NVML index of the device, taking CUDA_VISIBLE_DEVICES into account.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:_get_nvml_device_index arg:device arguments arg Assign Call Assign Call If Compare Call Assign Call If Compare Raise Call Assign Call Call Assign Call If BoolOp Compare Compare Call Raise Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_check_inverse_transform", + "source_code": "def _check_inverse_transform(self, X):\n idx_selected = slice(None, None, max(1, X.shape[0] // 100))\n X_round_trip = self.inverse_transform(self.transform(X[idx_selected]))\n if hasattr(X, 'dtype'):\n dtypes = [X.dtype]\n elif hasattr(X, 'dtypes'):\n dtypes = X.dtypes\n if not all((np.issubdtype(d, np.number) for d in dtypes)):\n raise ValueError(\"'check_inverse' is only supported when all the elements in `X` is numerical.\")\n if not _allclose_dense_sparse(X[idx_selected], X_round_trip):\n warnings.warn(\"The provided functions are not strictly inverse of each other. If you are sure you want to proceed regardless, set 'check_inverse=False'.\", UserWarning)", + "docstring": "Check that func and inverse_func are the inverse.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_function_transformer.py", + "ast_data": "FunctionDef name:_check_inverse_transform arg:self arg:X arguments arg arg Assign Call Call Assign Call Call If Call Assign If Call Assign If Call Call Raise Call If Call Call" + }, + { + "library": "scipy", + "name": "rk_step", + "source_code": "def rk_step(fun, t, y, f, h, A, B, C, K):\n K[0] = f\n for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1):\n dy = np.dot(K[:s].T, a[:s]) * h\n K[s] = fun(t + c * h, y + dy)\n y_new = y + h * np.dot(K[:-1].T, B)\n f_new = fun(t + h, y_new)\n K[-1] = f_new\n return (y_new, f_new)", + "docstring": "Perform a single Runge-Kutta step. This function computes a prediction of an explicit Runge-Kutta method and also estimates the error of a less accurate method. Notation for Butcher tableau is as in [1]_. Parameters ---------- fun : callable Right-hand side of the system. t : float Current time. y : ndarray, shape (n,) Current state. f : ndarray, shape (n,) Current value of the derivative, i.e., ``. References ---------- .. [1] E. Hairer, S. P. Norsett G. Wanner, \"Solving Ordinary Differential Equations I: Nonstiff Problems\", Sec. II.4.", + "type": "function", + "file_path": "scipy\\scipy\\integrate\\_ivp\\rk.py", + "ast_data": "FunctionDef name:rk_step arg:fun arg:t arg:y arg:f arg:h arg:A arg:B arg:C arg:K arguments arg arg arg arg arg arg arg arg arg Assign For Call Call Assign Call Assign Call Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "_get_rgba_and_mask", + "source_code": "def _get_rgba_and_mask(self, X, alpha=None, bytes=False):\n if not self._isinit:\n self._init()\n xa = np.array(X, copy=True)\n if not xa.dtype.isnative:\n xa = xa.byteswap().view(xa.dtype.newbyteorder())\n if xa.dtype.kind == 'f':\n xa *= self.N\n xa[xa == self.N] = self.N - 1\n mask_under = xa < 0\n mask_over = xa >= self.N\n mask_bad = X.mask if np.ma.is_masked(X) else np.isnan(xa)\n with np.errstate(invalid='ignore'):\n xa = xa.astype(int)\n xa[mask_under] = self._i_under\n xa[mask_over] = self._i_over\n xa[mask_bad] = self._i_bad\n lut = self._lut\n if bytes:\n lut = (lut * 255).astype(np.uint8)\n rgba = lut.take(xa, axis=0, mode='clip')\n if alpha is not None:\n alpha = np.clip(alpha, 0, 1)\n if bytes:\n alpha *= 255\n if alpha.shape not in [(), xa.shape]:\n raise ValueError(f'alpha is array-like but its shape {alpha.shape} does not match that of X {xa.shape}')\n rgba[..., -1] = alpha\n if (lut[-1] == 0).all():\n rgba[mask_bad] = (0, 0, 0, 0)\n return (rgba, mask_bad)", + "docstring": "Parameters ---------- X : float or int or array-like The data value(s) to convert to RGBA. For floats, *X* should be in the interval `numpy.uint8` or masked.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:_get_rgba_and_mask arg:self arg:X arg:alpha arg:bytes arguments arg arg arg arg If Call Assign Call If Assign Call Call Call If Compare Assign Compare Assign Compare Assign Compare Assign Call Call With Call Assign Call Assign Assign Assign Assign If Assign Call Assign Call If Compare Assign Call If If Compare Raise Call Assign If Call Compare Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "make_s_curve", + "source_code": "@validate_params({'n_samples': [Interval(Integral, 1, None, closed='left')], 'noise': [Interval(Real, 0, None, closed='left')], 'random_state': ['random_state']}, prefer_skip_nested_validation=True)\ndef make_s_curve(n_samples=100, *, noise=0.0, random_state=None):\n generator = check_random_state(random_state)\n t = 3 * np.pi * (generator.uniform(size=(1, n_samples)) - 0.5)\n X = np.empty(shape=(n_samples, 3), dtype=np.float64)\n X[:, 0] = np.sin(t)\n X[:, 1] = 2.0 * generator.uniform(size=n_samples)\n X[:, 2] = np.sign(t) * (np.cos(t) - 1)\n X += noise * generator.standard_normal(size=(3, n_samples)).T\n t = np.squeeze(t)\n return (X, t)", + "docstring": "Generate an S curve dataset. Read more in the :ref:. Parameters ---------- n_samples : int, default=100 The number of sample points on the S curve. noise : float, default=0.0 The standard deviation of the gaussian noise. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:. Returns ------- X : ndarray of shape (n_samples, 3) The points. t : ndarray of shape (n_samples,) The univariate position of the sample according to the main dimension of the points in the manifold. Examples -------- >>> from sklearn.datasets import make_s_curve >>> X, t = make_s_curve(noise=0.05, random_state=0) >>> X.shape (100, 3) >>> t.shape (100,)", + "type": "function", + "file_path": "scikit-learn\\sklearn\\datasets\\_samples_generator.py", + "ast_data": "FunctionDef name:make_s_curve arg:n_samples arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "make_dual", + "source_code": "def make_dual(tensor, tangent, *, level=None):\n _maybe_load_decompositions()\n if level is None:\n level = _current_level\n if level < 0:\n raise RuntimeError('Trying to create a dual Tensor for forward AD but no level exists, make sure to enter_dual_level() first.')\n if not (tensor.is_floating_point() or tensor.is_complex()):\n raise ValueError(f'Expected primal to be floating point or complex, but got: {tensor.dtype}')\n if not (tangent.is_floating_point() or tangent.is_complex()):\n raise ValueError(f'Expected tangent to be floating point or complex, but got: {tangent.dtype}')\n return torch._VF._make_dual(tensor, tangent, level=level)", + "docstring": "Associate a tensor value with its tangent to create a \"dual tensor\" for forward AD gradient computation. The result is a new tensor aliased to :attr: with :attr: embedded as an attribute as-is if it has the same storage layout or copied otherwise. The tangent attribute can be recovered with :func:. This function is backward differentiable. Given a function whose jacobian is , it allows one to compute the Jacobian-vector product () between and a given vector as follows. Example:: >>> # xdoctest: +SKIP(\"Undefined variables\") >>> with dual_level(): ... inp = make_dual(x, v) ... out = f(inp) ... y, jvp = unpack_dual(out) Please see the __ for detailed steps on how to use this API.", + "type": "function", + "file_path": "pytorch\\torch\\autograd\\forward_ad.py", + "ast_data": "FunctionDef name:make_dual arg:tensor arg:tangent arguments arg arg arg Call If Compare Assign If Compare Raise Call If BoolOp Call Call Raise Call If BoolOp Call Call Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "bessel_k1e", + "source_code": "@tf_export('math.special.bessel_k1e')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef bessel_k1e(x, name=None):\n with ops.name_scope(name, 'bessel_k1e', [x]):\n return gen_special_math_ops.bessel_k1e(x)", + "docstring": "Computes the Bessel k1e function of element-wise. Modified Bessel function of order 1. >>> tf.math.special.bessel_k1e([0.5, 1., 2., 4.]).numpy() array([2.73100971, 1.63615349, 1.03347685, 0.68157595], dtype=float32) Args: x: A or . Must be one of the following types: , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.k1e @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py", + "ast_data": "FunctionDef name:bessel_k1e arg:x arg:name arguments arg arg With Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_create_tensor_maps", + "source_code": "def _create_tensor_maps(self):\n self.tensorname_to_cache_idx = {}\n self.cache_idx_to_tensor_idx = []\n for out_tensor in self.traced_tensors:\n tensor_name = out_tensor.name\n if tensor_name in self.tensorname_to_cache_idx:\n raise ValueError('Tensor name {} should not be already in tensorname_to_cache_idx'.format(tensor_name))\n if tensor_name not in self.graph_order.tensor_to_idx:\n raise ValueError('Tensor name {} is not in the tensor_to_idx, tensor_to_idx={} '.format(tensor_name, self.graph_order.tensor_to_idx))\n tensor_idx = self.graph_order.tensor_to_idx[tensor_name]\n cache_idx = len(self.tensorname_to_cache_idx)\n self.tensorname_to_cache_idx[tensor_name] = cache_idx\n self.cache_idx_to_tensor_idx.append(tensor_idx)\n if len(self.tensorname_to_cache_idx) != len(self.cache_idx_to_tensor_idx):\n raise RuntimeError('len(self.tensorname_to_cache_idx) must equallen(self.cache_idx_to_tensor_idx), got len(self.tensorname_to_cache_idx)={}, len(self.cache_idx_to_tensor_idx)={}'.format(len(self.tensorname_to_cache_idx), len(self.cache_idx_to_tensor_idx)))", + "docstring": "Creates tensor to cache id maps.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py", + "ast_data": "FunctionDef name:_create_tensor_maps arg:self arguments arg Assign Assign For Assign If Compare Raise Call Call If Compare Raise Call Call Assign Assign Call Assign Call If Compare Call Call Raise Call Call Call Call" + }, + { + "library": "pandas", + "name": "_parse_latex_cell_styles", + "source_code": "def _parse_latex_cell_styles(latex_styles: CSSList, display_value: str, convert_css: bool=False) -> str:\n if convert_css:\n latex_styles = _parse_latex_css_conversion(latex_styles)\n for command, options in latex_styles[::-1]:\n formatter = {'--wrap': f'{{\\\\{command}--to_parse {display_value}}}', '--nowrap': f'\\\\{command}--to_parse {display_value}', '--lwrap': f'{{\\\\{command}--to_parse}} {display_value}', '--rwrap': f'\\\\{command}--to_parse{{{display_value}}}', '--dwrap': f'{{\\\\{command}--to_parse}}{{{display_value}}}'}\n display_value = f'\\\\{command}{options} {display_value}'\n for arg in ['--nowrap', '--wrap', '--lwrap', '--rwrap', '--dwrap']:\n if arg in str(options):\n display_value = formatter[arg].replace('--to_parse', _parse_latex_options_strip(value=options, arg=arg))\n break\n return display_value", + "docstring": "Mutate the `[('c1', 'o1'), ('c2', 'o2')]\\c1o1{\\c2o2{display_value}}--rwrap\\{}--wrap{\\ }--nowrap\\ --lwrap{\\} --dwrap{\\}{}[('c1', 'o1--wrap'), ('c2', 'o2')]{\\c1o1 \\c2o2{display_value}}", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\style_render.py", + "ast_data": "FunctionDef name:_parse_latex_cell_styles arg:latex_styles arg:display_value arg:convert_css arguments arg arg arg If Assign Call For Assign Assign For If Compare Call Assign Call Call Return return:yes" + }, + { + "library": "cryptography", + "name": "__eq__", + "source_code": "@abc.abstractmethod\ndef __eq__(self, other: object) -> bool:\n pass", + "docstring": "Checks equality.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x448.py", + "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg" + }, + { + "library": "scikit-learn", + "name": "InvalidVersion", + "source_code": "class InvalidVersion(ValueError):\n pass", + "docstring": "An invalid version was found, users should refer to PEP 440.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\externals\\_packaging\\version.py", + "ast_data": "ClassDef name:InvalidVersion" + }, + { + "library": "tensorflow", + "name": "_has_mutation_or_trackable", + "source_code": "def _has_mutation_or_trackable(self):\n if self._non_append_mutation:\n return True\n return any((isinstance(element, base.Trackable) for element in self._storage))", + "docstring": "Short-circuits a check for trackables if there's already a mutation.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py", + "ast_data": "FunctionDef name:_has_mutation_or_trackable arg:self arguments arg If Return return:yes Return return:yes Call Call" + }, + { + "library": "sphinx", + "name": "TemplateBridge", + "source_code": "class TemplateBridge:\n\n def init(self, builder: Builder, theme: Theme | None=None, dirs: list[str] | None=None) -> None:\n msg = 'must be implemented in subclasses'\n raise NotImplementedError(msg)\n\n def newest_template_mtime(self) -> float:\n return 0\n\n def render(self, template: str, context: dict[str, Any]) -> None:\n msg = 'must be implemented in subclasses'\n raise NotImplementedError(msg)\n\n def render_string(self, template: str, context: dict[str, Any]) -> str:\n msg = 'must be implemented in subclasses'\n raise NotImplementedError(msg)", + "docstring": "This class defines the interface for a \"template bridge\", that is, a class that renders templates given a template name and a context.", + "type": "class", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "ClassDef name:TemplateBridge FunctionDef name:init arg:self arg:builder arg:theme arg:dirs arguments arg arg arg arg Assign Raise Call FunctionDef name:newest_template_mtime arg:self arguments arg Return return:yes FunctionDef name:render arg:self arg:template arg:context arguments arg arg arg Assign Raise Call FunctionDef name:render_string arg:self arg:template arg:context arguments arg arg arg Assign Raise Call" + }, + { + "library": "scikit-learn", + "name": "_check_infrequent_enabled", + "source_code": "def _check_infrequent_enabled(self):\n max_categories = getattr(self, 'max_categories', None)\n min_frequency = getattr(self, 'min_frequency', None)\n self._infrequent_enabled = max_categories is not None and max_categories >= 1 or min_frequency is not None", + "docstring": "This functions checks whether _infrequent_enabled is True or False. This has to be called after parameter validation in the fit function.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_encoders.py", + "ast_data": "FunctionDef name:_check_infrequent_enabled arg:self arguments arg Assign Call Assign Call Assign BoolOp BoolOp Compare Compare Compare" + }, + { + "library": "pytorch", + "name": "in_progress", + "source_code": "def in_progress(self) -> bool:\n return self._level > 0", + "docstring": "True if we've entered the context.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py", + "ast_data": "FunctionDef name:in_progress arg:self arguments arg Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "numel", + "source_code": "def numel(self):\n shape = self.shape\n\n def _prod(xs):\n return functools.reduce(operator.mul, xs, 1)\n return _prod(shape)", + "docstring": "Returns the number of elements (not accounting for sparsity) in the mask.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py", + "ast_data": "FunctionDef name:numel arg:self arguments arg Assign FunctionDef name:_prod arg:xs arguments arg Return return:yes Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit_transform", + "source_code": "def fit_transform(self, X, y=None):\n self.fit(X)\n return self.embedding_", + "docstring": "Fit the model from data in X and transform X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. If affinity is \"precomputed\" X : {array-like, sparse matrix} of shape (n_samples, n_samples), Interpret X as precomputed adjacency graph computed from samples. y : Ignored Not used, present for API consistency by convention. Returns ------- X_new : array-like of shape (n_samples, n_components) Spectral embedding of the training matrix.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\manifold\\_spectral_embedding.py", + "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_conv2d_expanded_batch", + "source_code": "def _conv2d_expanded_batch(input, filters, strides, padding, data_format, dilations, name):\n input_rank = input.shape.rank\n if input_rank is None or input_rank < 5:\n return gen_nn_ops.conv2d(input, filter=filters, strides=strides, padding=padding, data_format=data_format, dilations=dilations, name=name)\n return squeeze_batch_dims(input, functools.partial(gen_nn_ops.conv2d, filter=filters, strides=strides, padding=padding, data_format=data_format, dilations=dilations), inner_rank=3, name=name)", + "docstring": "Helper function for ; handles expanded batches.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:_conv2d_expanded_batch arg:input arg:filters arg:strides arg:padding arg:data_format arg:dilations arg:name arguments arg arg arg arg arg arg arg Assign If BoolOp Compare Compare Return return:yes Call Return return:yes Call Call" + }, + { + "library": "pygame", + "name": "get_layer_of_sprite", + "source_code": "def get_layer_of_sprite(self, sprite):\n return self._spritelayers.get(sprite, self._default_layer)", + "docstring": "return the layer that sprite is currently in If the sprite is not found, then it will return the default layer.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:get_layer_of_sprite arg:self arg:sprite arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "InaccessibleSourceCodeError", + "source_code": "class InaccessibleSourceCodeError(PyCTError, ValueError):\n pass", + "docstring": "Raised when inspect can not access source code.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\errors.py", + "ast_data": "ClassDef name:InaccessibleSourceCodeError" + }, + { + "library": "scipy", + "name": "dg_series", + "source_code": "def dg_series(z, n):\n k = symbols('k')\n return -1 / z - EulerGamma + sympy.summation((-1) ** k * zeta(k) * z ** (k - 1), (k, 2, n + 1))", + "docstring": "Symbolic expansion of digamma(z) in z=0 to order n. See and with", + "type": "function", + "file_path": "scipy\\scipy\\special\\_precompute\\wright_bessel.py", + "ast_data": "FunctionDef name:dg_series arg:z arg:n arguments arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_estimate_gaussian_covariances_spherical", + "source_code": "def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar):\n return _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar).mean(1)", + "docstring": "Estimate the spherical variance values. Parameters ---------- responsibilities : array-like of shape (n_samples, n_components) X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) means : array-like of shape (n_components, n_features) reg_covar : float Returns ------- variances : array, shape (n_components,) The variance values of each components.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py", + "ast_data": "FunctionDef name:_estimate_gaussian_covariances_spherical arg:resp arg:X arg:nk arg:means arg:reg_covar arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "scale", + "source_code": "def scale(tensor: Tensor, scale_factor: Tensor, center: Union[None, Tensor]=None, mode: str='bilinear', padding_mode: str='zeros', align_corners: bool=True) -> Tensor:\n if not isinstance(tensor, Tensor):\n raise TypeError(f'Input tensor type is not a Tensor. Got {type(tensor)}')\n if not isinstance(scale_factor, Tensor):\n raise TypeError(f'Input scale_factor type is not a Tensor. Got {type(scale_factor)}')\n if len(scale_factor.shape) == 1:\n scale_factor = scale_factor.repeat(1, 2)\n if center is None:\n center = _compute_tensor_center(tensor)\n center = center.expand(tensor.shape[0], -1)\n scale_factor = scale_factor.expand(tensor.shape[0], 2)\n scaling_matrix: Tensor = _compute_scaling_matrix(scale_factor, center)\n return affine(tensor, scaling_matrix[..., :2, :3], mode, padding_mode, align_corners)", + "docstring": "Scale the tensor by a factor. .. image:: _static/img/scale.png Args: tensor: The image tensor to be warped in shapes of :math:. scale_factor: The scale factor apply. The tensor must have a shape of (B) or (B, 2), where B is batch size. If (B), isotropic scaling will perform. If (B, 2), x-y-direction specific scaling will perform. center: The center through which to scale. The tensor must have a shape of (B, 2), where B is batch size and last dimension contains cx and cy. mode: interpolation mode to calculate output values ``. align_corners: interpolation flag. Returns: The scaled tensor with the same shape as the input. Example: >>> img = torch.rand(1, 3, 4, 4) >>> scale_factor = torch.tensor([[2., 2.]]) >>> out = scale(img, scale_factor) >>> print(out.shape) torch.Size([1, 3, 4, 4])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py", + "ast_data": "FunctionDef name:scale arg:tensor arg:scale_factor arg:center arg:mode arg:padding_mode arg:align_corners arguments arg arg arg arg arg arg If Call Raise Call Call If Call Raise Call Call If Compare Call Assign Call If Compare Assign Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "huber_loss", + "source_code": "@register_decomposition(aten.huber_loss)\n@out_wrapper()\n@elementwise_type_promotion_wrapper(type_promoting_args=('input', 'target'), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef huber_loss(input: TensorLikeType, target: TensorLikeType, reduction: Union[str, int]='mean', delta: float=1.0) -> TensorLikeType:\n if type(reduction) is int:\n reduction = _reduction_int_to_str(reduction)\n _check_reduction_value(reduction)\n torch._check(delta > 0, lambda: 'huber_loss does not support non-positive values for delta.')\n z = (input - target).abs()\n loss = torch.where(z < delta, 0.5 * z * z, delta * (z - 0.5 * delta))\n return _apply_loss_reduction(loss, reduction)", + "docstring": "Reference implementation of torch.nn.functional.huber_loss", + "type": "function", + "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py", + "ast_data": "FunctionDef name:huber_loss arg:input arg:target arg:reduction arg:delta arguments arg arg arg arg If Compare Call Assign Call Call Call Compare arguments Assign Call Assign Call Compare Return return:yes Call Call Call Call" + }, + { + "library": "scrapy", + "name": "ContractFail", + "source_code": "class ContractFail(AssertionError):\n pass", + "docstring": "Error raised in case of a failing contract", + "type": "class", + "file_path": "scrapy\\scrapy\\exceptions.py", + "ast_data": "ClassDef name:ContractFail" + }, + { + "library": "tensorflow", + "name": "to_proto", + "source_code": "def to_proto(self, export_scope=None):\n raise NotImplementedError", + "docstring": "Converts a to a protocol buffer. Args: export_scope: Optional . Name scope to remove. Returns: A protocol buffer, or if the is not in the specified name scope.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:to_proto arg:self arg:export_scope arguments arg arg Raise" + }, + { + "library": "tensorflow", + "name": "StackTraceMapper", + "source_code": "class StackTraceMapper(StackTraceTransform):\n _stack_dict = _source_mapper_stacks\n\n def __init__(self):\n self.internal_map = _tf_stack.PyBindSourceMap()\n\n def update(self):\n self.internal_map.update_to(tuple(self.get_effective_source_map().items()))\n\n def get_effective_source_map(self):\n raise NotImplementedError('subclasses need to override this')", + "docstring": "Allows remapping traceback information to different source code.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_stack.py", + "ast_data": "ClassDef name:StackTraceMapper Assign FunctionDef name:__init__ arg:self arguments arg Assign Call FunctionDef name:update arg:self arguments arg Call Call Call Call FunctionDef name:get_effective_source_map arg:self arguments arg Raise Call" + }, + { + "library": "scikit-learn", + "name": "decision_function", + "source_code": "@available_if(_estimator_has('decision_function'))\ndef decision_function(self, X):\n _check_is_fitted(self)\n estimator = getattr(self, 'estimator_', self.estimator)\n return estimator.decision_function(X)", + "docstring": "Decision function for samples in using the fitted estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. Returns ------- decisions : ndarray of shape (n_samples,) The decision function computed the fitted estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_classification_threshold.py", + "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "captured_inputs", + "source_code": "@property\ndef captured_inputs(self):\n return nest.flatten([x() if callable(x) else x for x in self._captured_inputs], expand_composites=True)", + "docstring": "Returns external Tensors captured by this function. self.__call__(*args) passes to the function.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", + "ast_data": "FunctionDef name:captured_inputs arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "pygame", + "name": "set_timing_threshold", + "source_code": "def set_timing_threshold(self, time_ms):\n if isinstance(time_ms, (int, float)):\n self._time_threshold = time_ms\n else:\n raise TypeError(f'Expected numeric value, got {time_ms.__class__.__name__} instead')", + "docstring": "set the threshold in milliseconds set_timing_threshold(time_ms): return None Defaults to 1000.0 / 80.0. This means that the screen will be painted using the flip method rather than the update method if the update method is taking so long to update the screen that the frame rate falls below 80 frames per second. Raises TypeError if time_ms is not int or float.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:set_timing_threshold arg:self arg:time_ms arguments arg arg If Call Assign Raise Call" + }, + { + "library": "scikit-learn", + "name": "LinearClassifierMixin", + "source_code": "class LinearClassifierMixin(ClassifierMixin):\n\n def decision_function(self, X):\n check_is_fitted(self)\n xp, _ = get_namespace(X)\n X = validate_data(self, X, accept_sparse='csr', reset=False)\n scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_\n return xp.reshape(scores, (-1,)) if scores.ndim > 1 and scores.shape[1] == 1 else scores\n\n def predict(self, X):\n xp, _ = get_namespace(X)\n scores = self.decision_function(X)\n if len(scores.shape) == 1:\n indices = xp.astype(scores > 0, indexing_dtype(xp))\n else:\n indices = xp.argmax(scores, axis=1)\n return xp.take(self.classes_, indices, axis=0)\n\n def _predict_proba_lr(self, X):\n prob = self.decision_function(X)\n expit(prob, out=prob)\n if prob.ndim == 1:\n return np.vstack([1 - prob, prob]).T\n else:\n prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))\n return prob", + "docstring": "Mixin for linear classifiers. Handles prediction for sparse and dense X.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py", + "ast_data": "ClassDef name:LinearClassifierMixin FunctionDef name:decision_function arg:self arg:X arguments arg arg Call Assign Call Assign Call Assign Call Return return:yes BoolOp Compare Compare Call FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Assign Call If Compare Call Assign Call Compare Call Assign Call Return return:yes Call FunctionDef name:_predict_proba_lr arg:self arg:X arguments arg arg Assign Call Call If Compare Return return:yes Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "single_slice_dim", + "source_code": "def single_slice_dim(self, shape):\n if not isinstance(shape, (tuple, list)):\n raise TypeError('`shape` must be a sequence (like tuple or list) instead of ' + type(shape).__name__)\n if len(shape) != len(self.full_shape):\n raise ValueError('Expected equal length, but received shape={} of length {} while self.full_shape={} is of length {}.'.format(shape, len(shape), self.full_shape, len(self.full_shape)))\n for i in range(len(shape)):\n if self.var_offset[i] + shape[i] > self.full_shape[i]:\n raise ValueError('With self.var_offset={}, a partition of shape={} would exceed self.full_shape={} in dimension {}.'.format(self.var_offset, shape, self.full_shape, i))\n slice_dim = None\n for i in range(len(shape)):\n if shape[i] == self.full_shape[i]:\n continue\n if slice_dim is not None:\n raise ValueError('Cannot use single_slice_dim() with shape={} and self.full_shape={} since slice dim could be either dimension {} or {}.'.format(shape, self.full_shape, i, slice_dim))\n slice_dim = i\n return slice_dim", + "docstring": "Returns the slice dim when the variable is partitioned only in one dim. Args: shape: Tuple or list of indicating the shape of one specific variable partition. Returns: representing the dimension that the variable is partitioned in, or if the variable doesn't seem to be partitioned at all. Raises: TypeError: If is not a sequence. ValueError: If is not the same length as . If the variable is partitioned in more than one dimension.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py", + "ast_data": "FunctionDef name:single_slice_dim arg:self arg:shape arguments arg arg If Call Raise Call Call If Compare Call Call Raise Call Call Call Call For Call Call If Compare Raise Call Call Assign For Call Call If Compare If Compare Raise Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, buckets):\n self.buckets = buckets", + "docstring": "Creates a new Buckets. Args: buckets: A c pointer of TFE_MonitoringBuckets.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:buckets arguments arg arg Assign" + }, + { + "library": "pygame", + "name": "pump", + "source_code": "def pump():\n _ft_init_check()\n pygame.event.pump()", + "docstring": "pump() -> None internally process pygame event handlers", + "type": "function", + "file_path": "pygame\\src_py\\fastevent.py", + "ast_data": "FunctionDef name:pump arguments Call Call" + }, + { + "library": "tensorflow", + "name": "gather_index", + "source_code": "@property\n@abc.abstractmethod\ndef gather_index(self):\n pass", + "docstring": "Returns a 1D tensor. The size of the 1D tensor is equal to the destination size. The ith element of the result is the index of the source of the ith element.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:gather_index arg:self arguments arg" + }, + { + "library": "scipy", + "name": "CL_scaling_vector", + "source_code": "def CL_scaling_vector(x, g, lb, ub):\n v = np.ones_like(x)\n dv = np.zeros_like(x)\n mask = (g < 0) & np.isfinite(ub)\n v[mask] = ub[mask] - x[mask]\n dv[mask] = -1\n mask = (g > 0) & np.isfinite(lb)\n v[mask] = x[mask] - lb[mask]\n dv[mask] = 1\n return (v, dv)", + "docstring": "Compute Coleman-Li scaling vector and its derivatives. Components of a vector v are defined as follows:: | ub[i] - x[i], if g[i] 0 and lb[i] > -np.inf | 1, otherwise According to this definition v[i] >= 0 for all i. It differs from the definition in paper [1]_ (eq. (2.2)), where the absolute value of v is used. Both definitions are equivalent down the line. Derivatives of v with respect to x take value 1, -1 or 0 depending on a case. Returns ------- v : ndarray with shape of x Scaling vector. dv : ndarray with shape of x Derivatives of v[i] with respect to x[i], diagonal elements of v's Jacobian. References ---------- .. [1] M.A. Branch, T.F. Coleman, and Y. Li, \"A Subspace, Interior, and Conjugate Gradient Method for Large-Scale Bound-Constrained Minimization Problems,\" SIAM Journal on Scientific Computing, Vol. 21, Number 1, pp 1-23, 1999.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py", + "ast_data": "FunctionDef name:CL_scaling_vector arg:x arg:g arg:lb arg:ub arguments arg arg arg arg Assign Call Assign Call Assign Compare Call Assign Assign Assign Compare Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_make_dataset_iterator", + "source_code": "def _make_dataset_iterator(self, dataset):\n input_context = self._make_input_context()\n return input_lib_v1.DatasetIterator(dataset, self._input_workers, self._container_strategy(), num_replicas_in_sync=self._num_replicas_in_sync, input_context=input_context)", + "docstring": "Distributes the dataset to each local GPU.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_all_reduce_strategy.py", + "ast_data": "FunctionDef name:_make_dataset_iterator arg:self arg:dataset arguments arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "sphinx", + "name": "desc_signature_line", + "source_code": "class desc_signature_line(nodes.Part, nodes.Inline, nodes.FixedTextElement):\n sphinx_line_type = ''", + "docstring": "Node for a line in a multi-line object signature. It should only be used as a child of a :py:class: with `` for the line that should get the permalink.", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:desc_signature_line Assign" + }, + { + "library": "cherrypy", + "name": "script_name", + "source_code": "@property\ndef script_name(self):\n if self._script_name is not None:\n return self._script_name\n return cherrypy.serving.request.wsgi_environ['SCRIPT_NAME'].rstrip('/')", + "docstring": "The URI \"mount point\" for this app. A mount point is that portion of the URI which is constant for all URIs that are serviced by this application; it does not include scheme, host, or proxy (\"virtual host\") portions of the URI. For example, if script_name is \"/my/cool/app\", then the URL \" might be handled by a \"page1\" method on the root object. The value of script_name MUST NOT end in a slash. If the script_name refers to the root of the URI, it MUST be an empty string (not \"/\"). If script_name is explicitly set to None, then the script_name will be provided for each call from request.wsgi_environ['SCRIPT_NAME'].", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cptree.py", + "ast_data": "FunctionDef name:script_name arg:self arguments arg If Compare Return return:yes Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_gen_axes_spines", + "source_code": "def _gen_axes_spines(self, locations=None, offset=0.0, units='inches'):\n return {side: mspines.Spine.linear_spine(self, side) for side in ['left', 'right', 'bottom', 'top']}", + "docstring": "Returns ------- dict Mapping of spine names to or instances that are used to draw Axes spines. In the standard Axes, spines are single line segments, but in other projections they may not be. Notes ----- Intended to be overridden by new projection types.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:_gen_axes_spines arg:self arg:locations arg:offset arg:units arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "retrieve_from_web", + "source_code": "def retrieve_from_web(generate_csv=False):\n url = 'https://developer.nvidia.com/cuda-gpus'\n source = urllib.request.urlopen(url)\n matches = []\n while True:\n line = source.readline()\n if '' in line:\n break\n else:\n gpu = re.search('([\\\\w\\\\S\\\\s\\\\d\\\\[\\\\]\\\\,]+[^*])(.*', line)\n if gpu:\n matches.append(gpu.group(1))\n elif capability:\n if capability.group(3):\n capability_str = capability.group(4) + '.' + capability.group(6)\n else:\n capability_str = capability.group(1) + '.' + capability.group(2)\n matches.append(capability_str)\n return create_gpu_capa_map(matches, generate_csv)", + "docstring": "Retrieves list of all CUDA compute capability from NVIDIA webpage. Args: generate_csv: Boolean for generating an output file containing the results. Returns: OrderedDict that is a list of all CUDA compute capability listed on the NVIDIA page. Order goes from top to bottom of the webpage content (.html).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\data\\cuda_compute_capability.py", + "ast_data": "FunctionDef name:retrieve_from_web arg:generate_csv arguments arg Assign Assign Call Assign While Assign Call If Compare Assign Call Assign Call If Call Call If If Call Assign Call Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_vertical_stem_width", + "source_code": "def get_vertical_stem_width(self):\n return self._header.get(b'StdVW', None)", + "docstring": "Return the standard vertical stem width as float, or *None* if not specified in AFM file.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_afm.py", + "ast_data": "FunctionDef name:get_vertical_stem_width arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_tf_equal", + "source_code": "def _tf_equal(a, b):\n return gen_math_ops.equal(a, b)", + "docstring": "Overload of \"equal\" for Tensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py", + "ast_data": "FunctionDef name:_tf_equal arg:a arg:b arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_samples", + "source_code": "def get_samples(self):\n if not self.get_size() or not self.batch_size():\n return None\n total_sample = self.get_size() * self.batch_size()\n if self.has_partial_batch():\n total_sample -= self.batch_size() - self.partial_batch_size()\n return total_sample", + "docstring": "Returns number of samples in the data, or .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", + "ast_data": "FunctionDef name:get_samples arg:self arguments arg If BoolOp Call Call Return return:no Assign Call Call If Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_has_precomputed_value_rowids", + "source_code": "def _has_precomputed_value_rowids(self):\n return self._value_rowids is not None", + "docstring": "Returns true if has already been computed. If true, then will return its value without calling any TensorFlow ops.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:_has_precomputed_value_rowids arg:self arguments arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "get_classes", + "source_code": "def get_classes(tensors):\n return nest.pack_sequence_as(tensors, [sparse_tensor.SparseTensor if isinstance(tensor, sparse_tensor.SparseTensor) else tensor_lib.Tensor for tensor in nest.flatten(tensors)])", + "docstring": "Gets classes for a structure of tensors. Args: tensors: the tensor structure to get classes for. Returns: a structure matching the nested structure of , containing at positions where contains a sparse tensor and otherwise.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\util\\sparse.py", + "ast_data": "FunctionDef name:get_classes arg:tensors arguments arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "log_sigmoid", + "source_code": "@tf_export('math.log_sigmoid', v1=['math.log_sigmoid', 'log_sigmoid'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('log_sigmoid')\ndef log_sigmoid(x, name=None):\n with ops.name_scope(name, 'LogSigmoid', [x]) as name:\n x = ops.convert_to_tensor(x, name='x')\n return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name)", + "docstring": "Computes log sigmoid of element-wise. Specifically, . For numerical stability, we use . Args: x: A Tensor with type or . name: A name for the operation (optional). Returns: A Tensor with the same type as . Usage Example: If a positive number is large, then its log_sigmoid will approach to 0 since the formula will be which approximates to which is 0. >>> x = tf.constant([0.0, 1.0, 50.0, 100.0]) >>> tf.math.log_sigmoid(x) If a negative number is large, its log_sigmoid will approach to the number itself since the formula will be which is which approximates to that is the number itself. >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0]) >>> tf.math.log_sigmoid(x)", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:log_sigmoid arg:x arg:name arguments arg arg With Call Assign Call Return return:yes Call Call Call Call" + }, + { + "library": "pygame", + "name": "use_arraytype", + "source_code": "def use_arraytype(arraytype):\n warnings.warn(DeprecationWarning('only numpy arrays are now supported, this function will be removed in a future version of the module'))\n arraytype = arraytype.lower()\n if arraytype != 'numpy':\n raise ValueError('invalid array type')", + "docstring": "pygame.surfarray.use_arraytype(arraytype): return None DEPRECATED - only numpy arrays are now supported.", + "type": "function", + "file_path": "pygame\\src_py\\surfarray.py", + "ast_data": "FunctionDef name:use_arraytype arg:arraytype arguments arg Call Call Assign Call If Compare Raise Call" + }, + { + "library": "pytorch", + "name": "monitored_barrier", + "source_code": "def monitored_barrier(group: Optional[ProcessGroup]=GroupMember.WORLD, timeout=None, wait_all_ranks=False):\n if _rank_not_in_group(group):\n _warn_not_in_group('monitored_barrier')\n return\n if get_backend(group) != Backend.GLOO:\n raise ValueError('monitored_barrier is only implemented for GLOO backend.')\n if timeout is None:\n timeout = _get_default_timeout(get_backend(group))\n elif isinstance(timeout, float):\n warnings.warn(f'Please specify timeout arg as a timedelta. Converting current value of {timeout} assuming it represents seconds')\n timeout = timedelta(seconds=timeout)\n _check_valid_timeout(timeout)\n group_to_use = _get_default_group() if group is None else group\n return group_to_use.monitored_barrier(timeout, wait_all_ranks=wait_all_ranks)", + "docstring": "Synchronize processes similar to ``. Example:: >>> # xdoctest: +SKIP(\"need process group init\") >>> # Note: Process group initialization omitted on each rank. >>> import torch.distributed as dist >>> if dist.get_rank() != 1: >>> dist.monitored_barrier() # Raises exception indicating that >>> # rank 1 did not call into monitored_barrier. >>> # Example with wait_all_ranks=True >>> if dist.get_rank() == 0: >>> dist.monitored_barrier(wait_all_ranks=True) # Raises exception >>> # indicating that ranks 1, 2, ... world_size - 1 did not call into >>> # monitored_barrier.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:monitored_barrier arg:group arg:timeout arg:wait_all_ranks arguments arg arg arg If Call Call Return return:no If Compare Call Raise Call If Compare Assign Call Call If Call Call Assign Call Call Assign Compare Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_drop_level_numbers", + "source_code": "@final\ndef _drop_level_numbers(self, levnums: list[int]):\n if not levnums and (not isinstance(self, ABCMultiIndex)):\n return self\n if len(levnums) >= self.nlevels:\n raise ValueError(f'Cannot remove {len(levnums)} levels from an index with {self.nlevels} levels: at least one level must be left.')\n self = cast('MultiIndex', self)\n new_levels = list(self.levels)\n new_codes = list(self.codes)\n new_names = list(self.names)\n for i in levnums:\n new_levels.pop(i)\n new_codes.pop(i)\n new_names.pop(i)\n if len(new_levels) == 1:\n lev = new_levels[0]\n if len(lev) == 0:\n if len(new_codes[0]) == 0:\n result = lev[:0]\n else:\n res_values = algos.take(lev._values, new_codes[0], allow_fill=True)\n result = lev._constructor._simple_new(res_values, name=new_names[0])\n else:\n mask = new_codes[0] == -1\n result = new_levels[0].take(new_codes[0])\n if mask.any():\n result = result.putmask(mask, np.nan)\n result._name = new_names[0]\n return result\n else:\n from pandas.core.indexes.multi import MultiIndex\n return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)", + "docstring": "Drop MultiIndex levels by level _number_, not name.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_drop_level_numbers arg:self arg:levnums arguments arg arg If BoolOp Call Return return:yes If Compare Call Raise Call Call Assign Call Assign Call Assign Call Assign Call For Call Call Call If Compare Call Assign If Compare Call If Compare Call Assign Assign Call Assign Call Assign Compare Assign Call If Call Assign Call Assign Return return:yes Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_print_verbose_msg_iter_end", + "source_code": "def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n if n_iter % self.verbose_interval == 0:\n if self.verbose == 1:\n print(' Iteration %d' % n_iter)\n elif self.verbose >= 2:\n cur_time = time()\n print(' Iteration %d\\t time lapse %.5fs\\t ll change %.5f' % (n_iter, cur_time - self._iter_prev_time, diff_ll))\n self._iter_prev_time = cur_time", + "docstring": "Print verbose message on initialization.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\mixture\\_base.py", + "ast_data": "FunctionDef name:_print_verbose_msg_iter_end arg:self arg:n_iter arg:diff_ll arguments arg arg arg If Compare If Compare Call If Compare Assign Call Call Assign" + }, + { + "library": "tensorflow", + "name": "_create_per_worker_resources", + "source_code": "def _create_per_worker_resources(self, fn, args=None, kwargs=None):\n results = []\n for w in self._cluster.workers:\n results.append(w.create_resource(fn, args=args, kwargs=kwargs))\n return PerWorkerValues(tuple(results))", + "docstring": "Synchronously create resources on the workers. The resources are represented by s. Args: fn: The function to be dispatched to all workers for execution asynchronously. args: Positional arguments for . kwargs: Keyword arguments for . Returns: A object, which wraps a tuple of objects.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", + "ast_data": "FunctionDef name:_create_per_worker_resources arg:self arg:fn arg:args arg:kwargs arguments arg arg arg arg Assign For Call Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "as_int", + "source_code": "def as_int(self, is_64=False):\n if is_64:\n return capi.get_field_as_integer64(self._feat.ptr, self._index) if self.is_set else None\n else:\n return capi.get_field_as_integer(self._feat.ptr, self._index) if self.is_set else None", + "docstring": "Retrieve the Field's value as an integer.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\field.py", + "ast_data": "FunctionDef name:as_int arg:self arg:is_64 arguments arg arg If Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "y", + "source_code": "def y(self):\n return '%02d' % (self.data.year % 100)", + "docstring": "Year, 2 digits with leading zeros; e.g. '99'.", + "type": "method", + "file_path": "django\\django\\utils\\dateformat.py", + "ast_data": "FunctionDef name:y arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "indexing_dtype", + "source_code": "def indexing_dtype(xp):\n return xp.asarray(0).dtype", + "docstring": "Return a platform-specific integer dtype suitable for indexing. On 32-bit platforms, this will typically return int32 and int64 otherwise. Note: using dtype is recommended for indexing transient array datastructures. For long-lived arrays, such as the fitted attributes of estimators, it is instead recommended to use platform-independent int32 if we do not expect to index more 2B elements. Using fixed dtypes simplifies the handling of serialized models, e.g. to deploy a model fit on a 64-bit platform to a target 32-bit platform such as WASM/pyodide.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py", + "ast_data": "FunctionDef name:indexing_dtype arg:xp arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "set_image", + "source_code": "@torch.no_grad()\ndef set_image(self, image: Tensor, mean: Optional[Tensor]=None, std: Optional[Tensor]=None) -> None:\n KORNIA_CHECK_SHAPE(image, ['3', 'H', 'W'])\n self.reset_image()\n self._original_image_size = (image.shape[-2], image.shape[-1])\n image = self.transforms(image, data_keys=['input'])\n self._tfs_params = self.transforms._params\n self._input_image_size = (image.shape[-2], image.shape[-1])\n image = self.preprocess_image(image, mean, std)\n self._input_encoder_size = (image.shape[-2], image.shape[-1])\n self.image_embeddings = self.model.image_encoder(image)\n self.is_image_set = True", + "docstring": "Set the embeddings from the given image with of the model. Prepare the given image with the selected transforms and the preprocess method. Args: image: RGB image. Normally images with range of [0-1], the model preprocess normalize the pixel values with the mean and std defined in its initialization. Expected to be into a float32 dtype. Shape :math:. mean: mean value of dataset for normalization. std: standard deviation of dataset for normalization.", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\visual_prompter.py", + "ast_data": "FunctionDef name:set_image arg:self arg:image arg:mean arg:std arguments arg arg arg arg Call Call Assign Assign Call Assign Assign Assign Call Assign Assign Call Assign Call" + }, + { + "library": "scipy", + "name": "upfirdn", + "source_code": "def upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0):\n xp = array_namespace(h, x)\n x = np.asarray(x)\n ufd = _UpFIRDn(h, x.dtype, up, down)\n return xp.asarray(ufd.apply_filter(x, axis, mode, cval))", + "docstring": "Upsample, FIR filter, and downsample. Parameters ---------- h : array_like 1-D FIR (finite-impulse response) filter coefficients. x : array_like Input signal array. up : int, optional Upsampling rate. Default is 1. down : int, optional Downsampling rate. Default is 1. axis : int, optional The axis of the input data array along which to apply the linear filter. The filter is applied to each subarray along this axis. Default is -1. mode : str, optional The signal extension mode to use. The set `numpy.pad\"line\"xaxishupdown`: >>> upfirdn(h, x, 2, axis=0) array([[ 0., 1.], [ 0., 1.], [ 2., 3.], [ 2., 3.], [ 4., 5.], [ 4., 5.], [ 6., 7.], [ 6., 7.]])", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_upfirdn.py", + "ast_data": "FunctionDef name:upfirdn arg:h arg:x arg:up arg:down arg:axis arg:mode arg:cval arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_dense_tensor", + "source_code": "def get_dense_tensor(self, transformation_cache, state_manager):\n _check_invalid_cases(self._embedding_lookup_device)\n is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU\n is_cpu = is_cpu or _is_running_on_cpu()\n if is_cpu:\n return super(_TPUDeviceSpecificEmbeddingColumnV2, self).get_dense_tensor(transformation_cache, state_manager)\n elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:\n return super(_TPUDeviceSpecificEmbeddingColumnV2, self).get_dense_tensor(transformation_cache, state_manager)\n if tpu.under_tpu_inference_context():\n sparse_tensor = transformation_cache.get(self.categorical_column.name, state_manager)\n\n def host_computation():\n return pad_sparse_embedding_lookup_indices(sparse_tensor, self._tensor_core_shape[1])\n values, mask = tpu_replication.outside_compilation(host_computation)\n else:\n values = transformation_cache.get(self.categorical_column.name, state_manager)\n mask = transformation_cache.get(self.categorical_column.name + _TENSOR_CORE_MASK_KEY_SUFFIX, state_manager)\n embedding_weights = state_manager.get_variable(self, name='embedding_weights')\n return sparse_embedding_aggregate_slice(embedding_weights, (values, mask), self.get_combiner())", + "docstring": "Private method that follows get_dense_tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column_v2.py", + "ast_data": "FunctionDef name:get_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Call Assign Compare Assign BoolOp Call If Return return:yes Call Call If Compare Return return:yes Call Call If Call Assign Call FunctionDef name:host_computation arguments Return return:yes Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "inner", + "source_code": "@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)\ndef inner(a, b):\n return (a, b)", + "docstring": "inner(a, b, /) Inner product of two arrays. Ordinary inner product of vectors for 1-D arrays (without complex conjugation), in higher dimensions a sum product over the last axes. Parameters ---------- a, b : array_like If and are nonscalar, their last dimensions must match. Returns ------- out : ndarray If and are both scalars or both 1-D arrays then a scalar is returned; otherwise an array is returned. `abbabb` is a scalar: >>> np.inner(np.eye(2), 7) array([[7., 0.], [0., 7.]])", + "type": "function", + "file_path": "numpy\\numpy\\_core\\multiarray.py", + "ast_data": "FunctionDef name:inner arg:a arg:b arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_symint_hints", + "source_code": "def _get_symint_hints(exprs):\n if isinstance(exprs, (list, tuple)):\n return type(exprs)((_get_symint_hints(e) for e in exprs))\n elif isinstance(exprs, torch.SymInt):\n return exprs.node.shape_env.size_hint(exprs.node.expr)\n else:\n return exprs", + "docstring": "Get the hints of a list/tuple of int/SymInt.", + "type": "function", + "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\utils.py", + "ast_data": "FunctionDef name:_get_symint_hints arg:exprs arguments arg If Call Return return:yes Call Call Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "django", + "name": "georss_coords", + "source_code": "def georss_coords(self, coords):\n return ' '.join(('%f %f' % (coord[1], coord[0]) for coord in coords))", + "docstring": "In GeoRSS coordinate pairs are ordered by lat/lon and separated by a single white space. Given a tuple of coordinates, return a string GeoRSS representation.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\feeds.py", + "ast_data": "FunctionDef name:georss_coords arg:self arg:coords arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "idst", + "source_code": "@_dispatch\ndef idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, orthogonalize=None):\n return (Dispatchable(x, np.ndarray),)", + "docstring": "Return the Inverse Discrete Sine Transform of an arbitrary type sequence. Parameters ---------- x : array_like The input array. type : {1, 2, 3, 4}, optional Type of the DST (see Notes). Default type is 2. n : int, optional Length of the transform. If `xx~scipy.fft.fftdstidstdst` for the full definitions). 'The' IDST is the IDST-II, which is the same as the normalized DST-III. The IDST is equivalent to a normal DST except for the normalization and type. DST type 1 and 4 are their own inverse and DSTs 2 and 3 are each other's inverses.", + "type": "function", + "file_path": "scipy\\scipy\\fft\\_realtransforms.py", + "ast_data": "FunctionDef name:idst arg:x arg:type arg:n arg:axis arg:norm arg:overwrite_x arg:workers arg:orthogonalize arguments arg arg arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_truncate_execution_to_epoch", + "source_code": "@contextlib.contextmanager\ndef _truncate_execution_to_epoch(self):\n should_truncate = self._inferred_steps is not None and self._steps_per_execution_value > self._inferred_steps\n original_value = self._steps_per_execution_value\n try:\n if should_truncate:\n self._steps_per_execution.assign(self._inferred_steps)\n self._steps_per_execution_value = self._inferred_steps\n yield\n finally:\n if should_truncate:\n self._steps_per_execution.assign(original_value)\n self._steps_per_execution_value = original_value", + "docstring": "Truncates steps per execution to at most one epoch.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", + "ast_data": "FunctionDef name:_truncate_execution_to_epoch arg:self arguments arg Assign BoolOp Compare Compare Assign Try If Call Assign If Call Assign" + }, + { + "library": "pandas", + "name": "sheet_names", + "source_code": "@property\ndef sheet_names(self):\n return self._reader.sheet_names", + "docstring": "Names of the sheets in the document. This is particularly useful for loading a specific sheet into a DataFrame when you do not know the sheet names beforehand. Returns ------- list of str List of sheet names in the document. See Also -------- ExcelFile.parse : Parse a sheet into a DataFrame. read_excel : Read an Excel file into a pandas DataFrame. If you know the sheet names, it may be easier to specify them directly to read_excel. Examples -------- >>> file = pd.ExcelFile(\"myfile.xlsx\") # doctest: +SKIP >>> file.sheet_names # doctest: +SKIP [\"Sheet1\", \"Sheet2\"]", + "type": "method", + "file_path": "pandas\\pandas\\io\\excel\\_base.py", + "ast_data": "FunctionDef name:sheet_names arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, kernel_name: str, runtime_arg_info: list['ArgInfo'], runtime_arg_values: list[Any]) -> None:\n super().__init__()\n self.kernel_name = kernel_name\n self.runtime_arg_info = runtime_arg_info\n self.runtime_arg_values = runtime_arg_values", + "docstring": "Initializes a new instance of the CUDATemplateKernel class. Args: kernel_name (str): The name of the kernel.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:kernel_name arg:runtime_arg_info arg:runtime_arg_values arguments arg arg arg arg Call Call Assign Assign Assign" + }, + { + "library": "scipy", + "name": "_as_zpk", + "source_code": "def _as_zpk(self):\n if isinstance(self, ZerosPolesGain):\n return self\n else:\n return self.to_zpk()", + "docstring": "Convert to system, without copying. Returns ------- sys: ZerosPolesGain The system. If the class is already an instance of then this instance is returned.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:_as_zpk arg:self arguments arg If Call Return return:yes Return return:yes Call" + }, + { + "library": "sphinx", + "name": "read_svg_depth", + "source_code": "def read_svg_depth(filename: str | os.PathLike[str]) -> int | None:\n with open(filename, encoding='utf-8') as f:\n for line in f:\n pass\n matched = depthsvgcomment_re.match(line)\n if matched:\n return int(matched.group(1))\n return None", + "docstring": "Read the depth from comment at last line of SVG file", + "type": "function", + "file_path": "sphinx\\sphinx\\ext\\imgmath.py", + "ast_data": "FunctionDef name:read_svg_depth arg:filename arguments arg With Call For Assign Call If Return return:yes Call Call Return return:no" + }, + { + "library": "matplotlib", + "name": "set_depthshade", + "source_code": "def set_depthshade(self, depthshade, depthshade_minalpha=None):\n if depthshade_minalpha is None:\n depthshade_minalpha = rcParams['axes3d.depthshade_minalpha']\n self._depthshade = depthshade\n self._depthshade_minalpha = depthshade_minalpha\n self.stale = True", + "docstring": "Set whether depth shading is performed on collection members. Parameters ---------- depthshade : bool Whether to shade the patches in order to give the appearance of depth. depthshade_minalpha : float Sets the minimum alpha value used by depth-shading. .. versionadded:: 3.11", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:set_depthshade arg:self arg:depthshade arg:depthshade_minalpha arguments arg arg arg If Compare Assign Assign Assign Assign" + }, + { + "library": "scipy", + "name": "_primitive_root", + "source_code": "def _primitive_root(p):\n pm = p - 1\n factors = _factorize_int(pm)\n n = len(factors)\n r = 2\n k = 0\n while k < n:\n d = pm // factors[k]\n rd = pow(int(r), int(d), int(p))\n if rd == 1:\n r += 1\n k = 0\n else:\n k += 1\n return r", + "docstring": "Compute a primitive root of the prime number . Used in the CBC lattice construction. References ---------- .. [1]", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_qmvnt.py", + "ast_data": "FunctionDef name:_primitive_root arg:p arguments arg Assign Assign Call Assign Call Assign Assign While Compare Assign Assign Call Call Call Call If Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_scipy_sparse_to_sparse_tensor", + "source_code": "def _scipy_sparse_to_sparse_tensor(t):\n sparse_coo = t.tocoo()\n row, col = (sparse_coo.row, sparse_coo.col)\n data, shape = (sparse_coo.data, sparse_coo.shape)\n if issubclass(data.dtype.type, np.floating):\n data = data.astype(backend.floatx())\n indices = np.concatenate((np.expand_dims(row, axis=1), np.expand_dims(col, axis=1)), axis=1)\n return sparse_tensor.SparseTensor(indices, data, shape)", + "docstring": "Converts a SciPy sparse matrix to a SparseTensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", + "ast_data": "FunctionDef name:_scipy_sparse_to_sparse_tensor arg:t arguments arg Assign Call Assign Assign If Call Assign Call Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "IntegerChoices", + "source_code": "class IntegerChoices(Choices, IntEnum):\n pass", + "docstring": "Class for creating enumerated integer choices.", + "type": "class", + "file_path": "django\\django\\db\\models\\enums.py", + "ast_data": "ClassDef name:IntegerChoices" + }, + { + "library": "django", + "name": "names_digest", + "source_code": "def names_digest(*args, length):\n h = md5(usedforsecurity=False)\n for arg in args:\n h.update(arg.encode())\n return h.hexdigest()[:length]", + "docstring": "Generate a 32-bit digest of a set of arguments that can be used to shorten identifying names.", + "type": "function", + "file_path": "django\\django\\db\\backends\\utils.py", + "ast_data": "FunctionDef name:names_digest arguments arg arg Assign Call For Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "all_but", + "source_code": "@classmethod\ndef all_but(cls, exclude):\n if not isinstance(exclude, (list, tuple, set)):\n exclude = (exclude,)\n return tuple(set(cls.all()) - set(exclude) - {cls.ALL})", + "docstring": "Returns a tuple that enables all but the excluded options.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\converter.py", + "ast_data": "FunctionDef name:all_but arg:cls arg:exclude arguments arg arg If Call Assign Return return:yes Call Call Call Call" + }, + { + "library": "django", + "name": "set_language", + "source_code": "def set_language(request):\n next_url = request.POST.get('next', request.GET.get('next'))\n if (next_url or request.accepts('text/html')) and (not url_has_allowed_host_and_scheme(url=next_url, allowed_hosts={request.get_host()}, require_https=request.is_secure())):\n next_url = request.META.get('HTTP_REFERER')\n if not url_has_allowed_host_and_scheme(url=next_url, allowed_hosts={request.get_host()}, require_https=request.is_secure()):\n next_url = '/'\n response = HttpResponseRedirect(next_url) if next_url else HttpResponse(status=204)\n if request.method == 'POST':\n lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER)\n if lang_code and check_for_language(lang_code):\n if next_url:\n next_trans = translate_url(next_url, lang_code)\n if next_trans != next_url:\n response = HttpResponseRedirect(next_trans)\n response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code, max_age=settings.LANGUAGE_COOKIE_AGE, path=settings.LANGUAGE_COOKIE_PATH, domain=settings.LANGUAGE_COOKIE_DOMAIN, secure=settings.LANGUAGE_COOKIE_SECURE, httponly=settings.LANGUAGE_COOKIE_HTTPONLY, samesite=settings.LANGUAGE_COOKIE_SAMESITE)\n return response", + "docstring": "Redirect to a given URL while setting the chosen language in the session (if enabled) and in a cookie. The URL and the language code need to be specified in the request parameters. Since this view changes how the user will see the rest of the site, it must only be accessed as a POST request. If called as a GET request, it will redirect to the page in the request (the 'next' parameter) without changing any state.", + "type": "function", + "file_path": "django\\django\\views\\i18n.py", + "ast_data": "FunctionDef name:set_language arg:request arguments arg Assign Call Call If BoolOp BoolOp Call Call Call Call Assign Call If Call Call Call Assign Assign Call Call If Compare Assign Call If BoolOp Call If Assign Call If Compare Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "module_class_name", + "source_code": "@property\ndef module_class_name(self) -> str:\n if self._module_class is None:\n return ''\n if isinstance(self._module_class, type):\n return self._module_class.__name__\n return self._module_class", + "docstring": "Name of the module class. E.g. .", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py", + "ast_data": "FunctionDef name:module_class_name arg:self arguments arg If Compare Return return:yes If Call Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "consensus_score", + "source_code": "@validate_params({'a': [tuple], 'b': [tuple], 'similarity': [callable, StrOptions({'jaccard'})]}, prefer_skip_nested_validation=True)\ndef consensus_score(a, b, *, similarity='jaccard'):\n if similarity == 'jaccard':\n similarity = _jaccard\n matrix = _pairwise_similarity(a, b, similarity)\n row_indices, col_indices = linear_sum_assignment(1.0 - matrix)\n n_a = len(a[0])\n n_b = len(b[0])\n return float(matrix[row_indices, col_indices].sum() / max(n_a, n_b))", + "docstring": "The similarity of two sets of biclusters. Similarity between individual biclusters is computed. Then the best matching between sets is found by solving a linear sum assignment problem, using a modified Jonker-Volgenant algorithm. The final score is the sum of similarities divided by the size of the larger set. Read more in the :ref:. Parameters ---------- a : tuple (rows, columns) Tuple of row and column indicators for a set of biclusters. b : tuple (rows, columns) Another set of biclusters like `FABIA: factor analysis for bicluster acquisition `__. Examples -------- >>> from sklearn.metrics import consensus_score >>> a = ([[True, False], [False, True]], [[False, True], [True, False]]) >>> b = ([[False, True], [True, False]], [[True, False], [False, True]]) >>> consensus_score(a, b, similarity='jaccard') 1.0", + "type": "function", + "file_path": "scikit-learn\\sklearn\\metrics\\cluster\\_bicluster.py", + "ast_data": "FunctionDef name:consensus_score arg:a arg:b arguments arg arg arg If Compare Assign Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "profile", + "source_code": "@contextlib.contextmanager\ndef profile(mode: str='interval', wait_until_completed: bool=False):\n try:\n start(mode, wait_until_completed)\n yield\n finally:\n stop()", + "docstring": "Context Manager to enabling generating OS Signpost tracing from MPS backend. Args: mode(str): OS Signpost tracing mode could be \"interval\", \"event\", or both \"interval,event\". The interval mode traces the duration of execution of the operations, whereas event mode marks the completion of executions. See document _ for more info. wait_until_completed(bool): Waits until the MPS Stream complete executing each encoded GPU operation. This helps generating single dispatches on the trace's timeline. Note that enabling this option would affect the performance negatively. .. _Recording Performance Data:", + "type": "function", + "file_path": "pytorch\\torch\\mps\\profiler.py", + "ast_data": "FunctionDef name:profile arg:mode arg:wait_until_completed arguments arg arg Try Call Call" + }, + { + "library": "django", + "name": "__xor__", + "source_code": "def __xor__(self, other):\n return self.sym_difference(other)", + "docstring": "Return the symmetric difference of this Geometry and the other.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:__xor__ arg:self arg:other arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "single_method_decorator", + "source_code": "def single_method_decorator(f):\n\n @parameterized.named_parameters(*params)\n @functools.wraps(f)\n def decorated(self, model_type, *args, **kwargs):\n if model_type == 'functional':\n _test_functional_model_type(f, self, *args, **kwargs)\n elif model_type == 'subclass':\n _test_subclass_model_type(f, self, *args, **kwargs)\n elif model_type == 'sequential':\n _test_sequential_model_type(f, self, *args, **kwargs)\n else:\n raise ValueError('Unknown model type: %s' % (model_type,))\n return decorated", + "docstring": "Decorator that constructs the test cases.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\keras_parameterized.py", + "ast_data": "FunctionDef name:single_method_decorator arg:f arguments arg FunctionDef name:decorated arg:self arg:model_type arguments arg arg arg arg If Compare Call If Compare Call If Compare Call Raise Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "split_domain_port", + "source_code": "def split_domain_port(host):\n if (match := host_validation_re.fullmatch(host.lower())):\n domain, port = match.groups(default='')\n return (domain.removesuffix('.'), port)\n return ('', '')", + "docstring": "Return a (domain, port) tuple from a given host. Returned domain is lowercased. If the host is invalid, the domain will be empty.", + "type": "function", + "file_path": "django\\django\\http\\request.py", + "ast_data": "FunctionDef name:split_domain_port arg:host arguments arg If Call Call Assign Call Return return:yes Call Return return:yes" + }, + { + "library": "scipy", + "name": "combine_dict", + "source_code": "def combine_dict(*dicts, **kw):\n new_dict = {}\n for d in (kw,) + dicts:\n for key, value in d.items():\n if new_dict.get(key, None) is not None:\n old_value = new_dict[key]\n if isinstance(value, list | tuple):\n if isinstance(old_value, list | tuple):\n new_dict[key] = list(old_value) + list(value)\n continue\n elif value == old_value:\n continue\n raise ValueError(f'Conflicting configuration dicts: {new_dict!r} {d!r}')\n else:\n new_dict[key] = value\n return new_dict", + "docstring": "Combine Numpy distutils style library configuration dictionaries. Parameters ---------- *dicts Dictionaries of keys. List-valued keys will be concatenated. Otherwise, duplicate keys with different values result to an error. The input arguments are not modified. **kw Keyword arguments are treated as an additional dictionary (the first one, i.e., prepended). Returns ------- combined Dictionary with combined values.", + "type": "function", + "file_path": "scipy\\scipy\\_build_utils\\system_info.py", + "ast_data": "FunctionDef name:combine_dict arguments arg arg Assign For For Call If Compare Call Assign If Call If Call Assign Call Call If Compare Raise Call Assign Return return:yes" + }, + { + "library": "scrapy", + "name": "list", + "source_code": "def list(self) -> list[str]:\n pass", + "docstring": "Return a list with the names of all spiders available in the project", + "type": "method", + "file_path": "scrapy\\scrapy\\spiderloader.py", + "ast_data": "FunctionDef name:list arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "masked_fill_inference_rule", + "source_code": "@register_inference_rule('masked_fill_')\ndef masked_fill_inference_rule(n: Node, symbols, constraints, counter):\n assert isinstance(n.args[0], Node)\n assert isinstance(n.args[1], Node)\n e1 = symbols[n.args[0]]\n e2 = symbols[n.args[1]]\n if isinstance(e1, TVar) and isinstance(e2, TVar):\n masked_fill_tensor, counter = gen_tvar(counter)\n symbols[n] = masked_fill_tensor\n return gen_broadcasting_constraints(e1, e2, symbols, counter, masked_fill_tensor)\n else:\n raise NotImplementedError('Not yet implemented')", + "docstring": "Similar to addition. For now we implement the constraints when the argument is a boolean tensor. There is also a case for when it is a condition. We will leave this out for now.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py", + "ast_data": "FunctionDef name:masked_fill_inference_rule arg:n arg:symbols arg:constraints arg:counter arguments arg arg arg arg Call Call Assign Assign If BoolOp Call Call Assign Call Assign Return return:yes Call Raise Call Call" + }, + { + "library": "pandas", + "name": "NoNewAttributesMixin", + "source_code": "class NoNewAttributesMixin:\n\n def _freeze(self) -> None:\n object.__setattr__(self, '__frozen', True)\n\n def __setattr__(self, key: str, value) -> None:\n if getattr(self, '__frozen', False) and (not (key == '_cache' or key in type(self).__dict__ or getattr(self, key, None) is not None)):\n raise AttributeError(f\"You cannot add any new attribute '{key}'\")\n object.__setattr__(self, key, value)", + "docstring": "Mixin which prevents adding new attributes. Prevents additional attributes via xxx.attribute = \"something\" after a call to . Mainly used to prevent the user from using wrong attributes on an accessor (). If you really want to add a new attribute at a later time, you need to use .", + "type": "class", + "file_path": "pandas\\pandas\\core\\base.py", + "ast_data": "ClassDef name:NoNewAttributesMixin FunctionDef name:_freeze arg:self arguments arg Call FunctionDef name:__setattr__ arg:self arg:key arg:value arguments arg arg arg If BoolOp Call BoolOp Compare Compare Call Compare Call Raise Call Call" + }, + { + "library": "numpy", + "name": "chebpow", + "source_code": "def chebpow(c, pow, maxpower=16):\n [c] = pu.as_series([c])\n power = int(pow)\n if power != pow or power < 0:\n raise ValueError('Power must be a non-negative integer.')\n elif maxpower is not None and power > maxpower:\n raise ValueError('Power is too large')\n elif power == 0:\n return np.array([1], dtype=c.dtype)\n elif power == 1:\n return c\n else:\n zs = _cseries_to_zseries(c)\n prd = zs\n for i in range(2, power + 1):\n prd = np.convolve(prd, zs)\n return _zseries_to_cseries(prd)", + "docstring": "Raise a Chebyshev series to a power. Returns the Chebyshev series raised to the power . The argument is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series `` Parameters ---------- c : array_like 1-D array of Chebyshev series coefficients ordered from low to high. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Chebyshev series of power. See Also -------- chebadd, chebsub, chebmulx, chebmul, chebdiv Examples -------- >>> from numpy.polynomial import chebyshev as C >>> C.chebpow([1, 2, 3, 4], 2) array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\chebyshev.py", + "ast_data": "FunctionDef name:chebpow arg:c arg:pow arg:maxpower arguments arg arg arg Assign Call Assign Call If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Raise Call If Compare Return return:yes Call If Compare Return return:yes Assign Call Assign For Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_destroy", + "source_code": "def _destroy(qualname):\n custom_op = _find_custom_op(qualname)\n custom_op._destroy()", + "docstring": "De-registers a custom op. For testing purposes only", + "type": "function", + "file_path": "pytorch\\torch\\_custom_ops.py", + "ast_data": "FunctionDef name:_destroy arg:qualname arguments arg Assign Call Call" + }, + { + "library": "tensorflow", + "name": "embedding_layouts", + "source_code": "@property\ndef embedding_layouts(self) -> Dict[str, sparse_core_layout_pb2.SparseCoreTableLayout]:\n return self._s.table_to_layout", + "docstring": "Returns how the tables are laid out in the variables. The SparseCoreTableLayout describes how a table is stored in its internal state. You need this only if you need to pull apart the internal state.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py", + "ast_data": "FunctionDef name:embedding_layouts arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "CompositeTensor", + "source_code": "@tf_export('__internal__.CompositeTensor', v1=[])\nclass CompositeTensor(metaclass=abc.ABCMeta):\n\n @abc.abstractproperty\n def _type_spec(self):\n raise NotImplementedError(f'{type(self).__name__}._type_spec()')\n\n def _shape_invariant_to_type_spec(self, shape):\n raise NotImplementedError(f'{type(self).__name__}._shape_invariant_to_type_spec')\n\n def _consumers(self):\n consumers = nest.flatten([component.consumers() for component in nest.flatten(self, expand_composites=True) if getattr(component, 'graph', None) is not None])\n return list(set(consumers))\n\n def __tf_tracing_type__(self, context):\n return self._type_spec.__tf_tracing_type__(context)\n\n def _convert_variables_to_tensors(self):\n return self", + "docstring": "Abstract base class for Tensor-like objects that are composed from Tensors. Each can be decomposed into a structured collection of component s, and reconstructed from those components. The module has support for treating composite tensors as structure, which makes it easy to flatten and reconstruct composite tensors (or larger structures that contain composite tensors). E.g.:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor.py", + "ast_data": "ClassDef name:CompositeTensor FunctionDef name:_type_spec arg:self arguments arg Raise Call Call FunctionDef name:_shape_invariant_to_type_spec arg:self arg:shape arguments arg arg Raise Call Call FunctionDef name:_consumers arg:self arguments arg Assign Call Call Call Compare Call Return return:yes Call Call FunctionDef name:__tf_tracing_type__ arg:self arg:context arguments arg arg Return return:yes Call FunctionDef name:_convert_variables_to_tensors arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_resource_safe_shape", + "source_code": "def _resource_safe_shape(t):\n if t.dtype == dtypes.resource:\n while t.op.inputs:\n t = t.op.inputs[0]\n return tensor_shape.TensorShape(t.op.get_attr('shape'))\n return array_ops.shape_internal(t, optimize=False)", + "docstring": "Returns the shape of t or the variable it points to.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:_resource_safe_shape arg:t arguments arg If Compare While Assign Return return:yes Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "detrend_linear", + "source_code": "def detrend_linear(y):\n y = np.asarray(y)\n if y.ndim > 1:\n raise ValueError('y cannot have ndim > 1')\n if not y.ndim:\n return np.array(0.0, dtype=y.dtype)\n x = np.arange(y.size, dtype=float)\n C = np.cov(x, y, bias=1)\n b = C[0, 1] / C[0, 0]\n a = y.mean() - b * x.mean()\n return y - (b * x + a)", + "docstring": "Return *x* minus best fit line; 'linear' detrending. Parameters ---------- y : 0-D or 1-D array or sequence Array or sequence containing the data See Also -------- detrend_mean : Another detrend algorithm. detrend_none : Another detrend algorithm. detrend : A wrapper around all the detrend algorithms.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\mlab.py", + "ast_data": "FunctionDef name:detrend_linear arg:y arguments arg Assign Call If Compare Raise Call If Return return:yes Call Assign Call Assign Call Assign Assign Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "signature_from_ast", + "source_code": "def signature_from_ast(node: ast.FunctionDef, code: str='') -> Signature:\n EMPTY = Parameter.empty\n args: ast.arguments = node.args\n defaults: tuple[ast.expr | None, ...] = tuple(args.defaults)\n pos_only_offset = len(args.posonlyargs)\n defaults_offset = pos_only_offset + len(args.args) - len(defaults)\n defaults = (None,) * defaults_offset + defaults\n params: list[Parameter] = []\n for arg, defexpr in zip(args.posonlyargs, defaults, strict=False):\n params.append(_define(Parameter.POSITIONAL_ONLY, arg, code, defexpr=defexpr))\n for arg, defexpr in zip(args.args, defaults[pos_only_offset:], strict=False):\n params.append(_define(Parameter.POSITIONAL_OR_KEYWORD, arg, code, defexpr=defexpr))\n if args.vararg:\n params.append(_define(Parameter.VAR_POSITIONAL, args.vararg, code, defexpr=None))\n for arg, defexpr in zip(args.kwonlyargs, args.kw_defaults, strict=False):\n params.append(_define(Parameter.KEYWORD_ONLY, arg, code, defexpr=defexpr))\n if args.kwarg:\n params.append(_define(Parameter.VAR_KEYWORD, args.kwarg, code, defexpr=None))\n return_annotation = ast_unparse(node.returns, code) or EMPTY\n return Signature(params, return_annotation=return_annotation)", + "docstring": "Create a :class: object from an AST node.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\inspect.py", + "ast_data": "FunctionDef name:signature_from_ast arg:node arg:code arguments arg arg Assign Call Assign Call Assign Call Call Assign For Call Call Call For Call Call Call If Call Call For Call Call Call If Call Call Assign BoolOp Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_fqn_to_example_inputs", + "source_code": "def get_fqn_to_example_inputs(model: torch.nn.Module, example_inputs: tuple[Any, ...]) -> dict[str, tuple[Any, ...]]:\n root = model\n fqn_to_example_inputs = {}\n\n def _patched_module_call(self, *args, **kwargs):\n submodule_example_inputs = list(args).copy()\n normalized_kwargs = _normalize_kwargs(self.forward, kwargs)\n num_args = _get_num_pos_args(self.forward) - 1\n num_to_pop = num_args - len(submodule_example_inputs)\n while num_to_pop and normalized_kwargs:\n normalized_kwargs.popitem(last=False)\n num_to_pop -= 1\n submodule_example_inputs.extend(normalized_kwargs.values())\n submodule_example_inputs_tuple = tuple(submodule_example_inputs)\n fqn = _get_path_of_module(root, self)\n if fqn is not None:\n fqn_to_example_inputs[fqn] = submodule_example_inputs_tuple\n return orig_module_call(self, *args, **kwargs)\n orig_module_call = torch.nn.Module.__call__\n torch.nn.Module.__call__ = _patched_module_call\n try:\n model(*example_inputs)\n finally:\n torch.nn.Module.__call__ = orig_module_call\n return fqn_to_example_inputs", + "docstring": "Given a model and its example inputs, return a dictionary from fully qualified name of submodules to example_inputs for that submodule, e.g. {\"linear1\": (tensor1,), \"linear2\": (tensor2,), \"sub\": (tensor3,), \"sub.linear1\": (tensor4,), ...} Used to make quantizing submodules easier now that FX Graph Mode Quantization requires example inputs. Also works for keyword arguments with default values, we would flatten keyword arguments as positional arguments and fill in the missing keyword args with default values, e.g. if we have a forward function: def forward(self, x, key1=3, key2=3): ... and we call it with self.submodule(x, key2=6) we'll get example_inputs: (x, 3, 6) user can also override with positional arguments as well: for self.submodule(x, 5, key2=6) we'll get: (x, 5, 6) variable positional arguments and variable positional keyword arguments in forward function are not supported currently, so please make sure no submodules is using them.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\utils.py", + "ast_data": "FunctionDef name:get_fqn_to_example_inputs arg:model arg:example_inputs arguments arg arg Assign Assign FunctionDef name:_patched_module_call arg:self arguments arg arg arg Assign Call Call Assign Call Assign Call Assign Call While BoolOp Call Call Call Assign Call Assign Call If Compare Assign Return return:yes Call Assign Assign Try Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_ascending_sort", + "source_code": "def _ascending_sort(values, axis, return_argsort=False):\n dtype = values.dtype\n if dtype.is_unsigned:\n offset = dtype.max\n values_or_indices = _descending_sort(offset - values, axis, return_argsort)\n return values_or_indices if return_argsort else offset - values_or_indices\n elif dtype.is_integer:\n values_or_indices = _descending_sort(-values - 1, axis, return_argsort)\n return values_or_indices if return_argsort else -values_or_indices - 1\n else:\n values_or_indices = _descending_sort(-values, axis, return_argsort)\n return values_or_indices if return_argsort else -values_or_indices", + "docstring": "Sorts values in ascending order. Args: values: Tensor of numeric values. axis: Index of the axis which values should be sorted along. return_argsort: If False, return the sorted values. If True, return the indices that would sort the values. Returns: The sorted values.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sort_ops.py", + "ast_data": "FunctionDef name:_ascending_sort arg:values arg:axis arg:return_argsort arguments arg arg arg Assign If Assign Assign Call Return return:yes If Assign Call Return return:yes Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "add", + "source_code": "def add(self, op1, op2, operator_name, hints=None):\n updated_hints = _infer_hints_allowing_override(op1, op2, hints)\n if operator_name is None:\n operator_name = 'Add/' + op1.name + '__' + op2.name + '/'\n scope_name = self.name\n if scope_name.startswith('_'):\n scope_name = scope_name[1:]\n with ops.name_scope(scope_name):\n return self._add(op1, op2, operator_name, updated_hints)", + "docstring": "Return new acting like . Args: op1: op2: , with and such that adding to is allowed. operator_name: name to give to returned hints: object. Returned will be created with these hints. Returns:", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_addition.py", + "ast_data": "FunctionDef name:add arg:self arg:op1 arg:op2 arg:operator_name arg:hints arguments arg arg arg arg arg Assign Call If Compare Assign Assign If Call Assign With Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "get_box_kernel2d", + "source_code": "def get_box_kernel2d(kernel_size: tuple[int, int] | int, *, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n ky, kx = _unpack_2d_ks(kernel_size)\n scale = tensor(1.0 / (kx * ky), device=device, dtype=dtype)\n return scale.expand(1, ky, kx)", + "docstring": "Return a 2-D box filter. Args: kernel_size: the size of the kernel. device: the desired device of returned tensor. dtype: the desired data type of returned tensor. Returns: A tensor with shape :math:, filled with the value :math:.", + "type": "function", + "file_path": "kornia\\kornia\\filters\\kernels.py", + "ast_data": "FunctionDef name:get_box_kernel2d arg:kernel_size arguments arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "put_variables", + "source_code": "def put_variables(self, mdict, write_header=None):\n self._matrix_writer = VarWriter4(self)\n for name, var in mdict.items():\n self._matrix_writer.write(var, name)", + "docstring": "Write variables in to stream Parameters ---------- mdict : mapping mapping with method `` is something writeable to a matlab file, such as a NumPy array. write_header : {None, True, False} If True, then write the matlab file header before writing the variables. If None (the default) then write the file header if we are at position 0 in the stream. By setting False here, and setting the stream position to the end of the file, you can append variables to a matlab file", + "type": "method", + "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py", + "ast_data": "FunctionDef name:put_variables arg:self arg:mdict arg:write_header arguments arg arg arg Assign Call For Call Call" + }, + { + "library": "pandas", + "name": "_equal_values", + "source_code": "def _equal_values(self, other: Self) -> bool:\n if other.ndim != 1:\n return False\n left = self.blocks[0].values\n right = other.blocks[0].values\n return array_equals(left, right)", + "docstring": "Used in .equals defined in base class. Only check the column values assuming shape and indexes have already been checked.", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:_equal_values arg:self arg:other arguments arg arg If Compare Return return:yes Assign Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "cancel", + "source_code": "def cancel(self) -> None:\n if self._finalizer:\n self._finalizer()", + "docstring": "Stop the timer at the next opportunity.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\utils.py", + "ast_data": "FunctionDef name:cancel arg:self arguments arg If Call" + }, + { + "library": "tensorflow", + "name": "_HasAnyNotNoneGrads", + "source_code": "def _HasAnyNotNoneGrads(grads, op: ops.Operation):\n out_grads = _GetGrads(grads, op)\n for out_grad in out_grads:\n if isinstance(out_grad, (tensor_lib.Tensor, indexed_slices.IndexedSlices)):\n return True\n if out_grad and isinstance(out_grad, collections_abc.Sequence):\n if any((g is not None for g in out_grad)):\n return True\n return False", + "docstring": "Return true iff op has real gradient.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py", + "ast_data": "FunctionDef name:_HasAnyNotNoneGrads arg:grads arg:op arguments arg arg Assign Call For If Call Return return:yes If BoolOp Call If Call Compare Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "include_empty_choice", + "source_code": "@property\ndef include_empty_choice(self):\n return self.field.null or (self.field.is_relation and self.field.many_to_many)", + "docstring": "Return True if a \"(None)\" choice should be included, which filters out everything except empty relationships.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\filters.py", + "ast_data": "FunctionDef name:include_empty_choice arg:self arguments arg Return return:yes BoolOp BoolOp" + }, + { + "library": "kornia", + "name": "denormalize_homography", + "source_code": "def denormalize_homography(dst_pix_trans_src_pix: Tensor, dsize_src: tuple[int, int], dsize_dst: tuple[int, int]) -> Tensor:\n if not isinstance(dst_pix_trans_src_pix, Tensor):\n raise TypeError(f'Input type is not a Tensor. Got {type(dst_pix_trans_src_pix)}')\n if not (len(dst_pix_trans_src_pix.shape) == 3 or dst_pix_trans_src_pix.shape[-2:] == (3, 3)):\n raise ValueError(f'Input dst_pix_trans_src_pix must be a Bx3x3 tensor. Got {dst_pix_trans_src_pix.shape}')\n src_h, src_w = dsize_src\n dst_h, dst_w = dsize_dst\n src_norm_trans_src_pix: Tensor = normal_transform_pixel(src_h, src_w).to(dst_pix_trans_src_pix)\n dst_norm_trans_dst_pix: Tensor = normal_transform_pixel(dst_h, dst_w).to(dst_pix_trans_src_pix)\n dst_denorm_trans_dst_pix = _torch_inverse_cast(dst_norm_trans_dst_pix)\n dst_norm_trans_src_norm: Tensor = dst_denorm_trans_dst_pix @ (dst_pix_trans_src_pix @ src_norm_trans_src_pix)\n return dst_norm_trans_src_norm", + "docstring": "De-normalize a given homography in pixels from [-1, 1] to actual height and width. Args: dst_pix_trans_src_pix: homography/ies from source to destination to be denormalized. :math: dsize_src: size of the source image (height, width). dsize_dst: size of the destination image (height, width). Returns: the denormalized homography of shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:denormalize_homography arg:dst_pix_trans_src_pix arg:dsize_src arg:dsize_dst arguments arg arg arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Assign Assign Call Call Call Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "login", + "source_code": "def login(request, user, backend=None):\n session_auth_hash = ''\n if user is None:\n user = request.user\n warnings.warn('Fallback to request.user when user is None will be removed.', RemovedInDjango61Warning, stacklevel=2)\n if hasattr(user, 'get_session_auth_hash'):\n session_auth_hash = user.get_session_auth_hash()\n if SESSION_KEY in request.session:\n if _get_user_session_key(request) != user.pk or (session_auth_hash and (not constant_time_compare(request.session.get(HASH_SESSION_KEY, ''), session_auth_hash))):\n request.session.flush()\n else:\n request.session.cycle_key()\n backend = _get_backend_from_user(user=user, backend=backend)\n request.session[SESSION_KEY] = user._meta.pk.value_to_string(user)\n request.session[BACKEND_SESSION_KEY] = backend\n request.session[HASH_SESSION_KEY] = session_auth_hash\n if hasattr(request, 'user'):\n request.user = user\n rotate_token(request)\n user_logged_in.send(sender=user.__class__, request=request, user=user)", + "docstring": "Persist a user id and a backend in the request. This way a user doesn't have to reauthenticate on every request. Note that data set during the anonymous session is retained when the user logs in.", + "type": "function", + "file_path": "django\\django\\contrib\\auth\\__init__.py", + "ast_data": "FunctionDef name:login arg:request arg:user arg:backend arguments arg arg arg Assign If Compare Assign Call If Call Assign Call If Compare If BoolOp Compare Call BoolOp Call Call Call Call Assign Call Assign Call Assign Assign If Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "assert_like_rnncell", + "source_code": "def assert_like_rnncell(cell_name, cell):\n conditions = [_hasattr(cell, 'output_size'), _hasattr(cell, 'state_size'), _hasattr(cell, 'get_initial_state') or _hasattr(cell, 'zero_state'), callable(cell)]\n errors = [\"'output_size' property is missing\", \"'state_size' property is missing\", \"either 'zero_state' or 'get_initial_state' method is required\", 'is not callable']\n if not all(conditions):\n errors = [error for error, cond in zip(errors, conditions) if not cond]\n raise TypeError('The argument {!r} ({}) is not an RNNCell: {}.'.format(cell_name, cell, ', '.join(errors)))", + "docstring": "Raises a TypeError if cell is not like an RNNCell. NOTE: Do not rely on the error message (in particular in tests) which can be subject to change to increase readability. Use ASSERT_LIKE_RNNCELL_ERROR_REGEXP. Args: cell_name: A string to give a meaningful error referencing to the name of the functionargument. cell: The object which should behave like an RNNCell. Raises: TypeError: A human-friendly exception.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py", + "ast_data": "FunctionDef name:assert_like_rnncell arg:cell_name arg:cell arguments arg arg Assign Call Call BoolOp Call Call Call Assign If Call Assign Call Raise Call Call Call" + }, + { + "library": "scipy", + "name": "__str__", + "source_code": "def __str__(self):\n messages = [str(param) for name, param in self.parameters.items()]\n return ', '.join(messages)", + "docstring": "Returns a string representation of the parameterization.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py", + "ast_data": "FunctionDef name:__str__ arg:self arguments arg Assign Call Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "getdoc", + "source_code": "def getdoc(self, np_ret, np_ma_ret):\n doc = getattr(self._func, '__doc__', None)\n sig = get_object_signature(self._func)\n if doc:\n doc = self._replace_return_type(doc, np_ret, np_ma_ret)\n if sig:\n sig = f'{self._func.__name__}{sig}\\n'\n doc = sig + doc\n return doc", + "docstring": "Return the doc of the function (from the doc of the method).", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:getdoc arg:self arg:np_ret arg:np_ma_ret arguments arg arg arg Assign Call Assign Call If Assign Call If Assign Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_get_estimator", + "source_code": "def _get_estimator(self):\n if self.estimator is None:\n estimator = LinearSVC(random_state=0)\n if _routing_enabled():\n estimator.set_fit_request(sample_weight=True)\n else:\n estimator = self.estimator\n return estimator", + "docstring": "Resolve which estimator to return (default is LinearSVC)", + "type": "method", + "file_path": "scikit-learn\\sklearn\\calibration.py", + "ast_data": "FunctionDef name:_get_estimator arg:self arguments arg If Compare Assign Call If Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "choose_qparams_tensor", + "source_code": "@impl(quantized_decomposed_lib, 'choose_qparams.tensor', 'CompositeExplicitAutograd')\ndef choose_qparams_tensor(input: torch.Tensor, qmin: int, qmax: int, eps: float, dtype: torch.dtype) -> tuple[torch.Tensor, torch.Tensor]:\n assert input.dtype in [torch.float32, torch.float16, torch.bfloat16], f'Expecting input to have dtype torch.float32/16/b16, but got dtype: {input.dtype}'\n assert dtype in _DTYPE_TO_QVALUE_BOUNDS, f'Expecting target dtype to be one of {_DTYPE_TO_QVALUE_BOUNDS.keys()}, but got: {dtype}'\n validate_qmin_qmax(qmin, qmax)\n min_val, max_val = torch.aminmax(input)\n return determine_qparams(min_val, max_val, qmin, qmax, dtype, torch.Tensor([eps]), has_customized_qrange=False)", + "docstring": "Given an input Tensor, derive the per tensor affine quantization parameter (scale and zero_point) for target quantized Tensor from the Tensor Args: input (torch.Tensor): floating point input Tensor quant_min (int): minimum quantized value for target quantized Tensor quant_max (int): maximum quantized value for target quantized Tensor dtype (torch.dtype): dtype for target quantized Tensor Returns: scale (float): quantization parameter for the target quantized Tensor zero_point (int): quantization parameter for the target quantized Tensor", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py", + "ast_data": "FunctionDef name:choose_qparams_tensor arg:input arg:qmin arg:qmax arg:eps arg:dtype arguments arg arg arg arg arg Compare Compare Call Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_make_one_shot_iterator", + "source_code": "def _make_one_shot_iterator(self):\n if not context.executing_eagerly():\n raise ValueError('Cannot create a one shot iterator. Please use `make_initializable_iterator()` instead.')\n return self._get_iterator()", + "docstring": "Get an iterator for iterating over DistributedDatasetsFromFunctionV1.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py", + "ast_data": "FunctionDef name:_make_one_shot_iterator arg:self arguments arg If Call Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "register_tab_comp_context", + "source_code": "def register_tab_comp_context(self, context_words, comp_items):\n if not isinstance(context_words, list):\n raise TypeError('Incorrect type in context_list: Expected list, got %s' % type(context_words))\n if not isinstance(comp_items, list):\n raise TypeError('Incorrect type in comp_items: Expected list, got %s' % type(comp_items))\n sorted_comp_items = sorted(comp_items)\n for context_word in context_words:\n self._comp_dict[context_word] = sorted_comp_items", + "docstring": "Register a tab-completion context. Register that, for each word in context_words, the potential tab-completions are the words in comp_items. A context word is a pre-existing, completed word in the command line that determines how tab-completion works for another, incomplete word in the same command line. Completion items consist of potential candidates for the incomplete word. To give a general example, a context word can be \"drink\", and the completion items can be [\"coffee\", \"tea\", \"water\"] Note: A context word can be empty, in which case the context is for the top-level commands. Args: context_words: A list of context words belonging to the context being registered. It is a list of str, instead of a single string, to support synonym words triggering the same tab-completion context, e.g., both \"drink\" and the short-hand \"dr\" can trigger the same context. comp_items: A list of completion items, as a list of str. Raises: TypeError: if the input arguments are not all of the correct types.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", + "ast_data": "FunctionDef name:register_tab_comp_context arg:self arg:context_words arg:comp_items arguments arg arg arg If Call Raise Call Call If Call Raise Call Call Assign Call For Assign" + }, + { + "library": "sphinx", + "name": "after_content", + "source_code": "def after_content(self) -> None:\n objects = self.env.ref_context.setdefault('js:objects', [])\n if self.allow_nesting:\n with contextlib.suppress(IndexError):\n objects.pop()\n self.env.ref_context['js:object'] = objects[-1] if len(objects) > 0 else None", + "docstring": "Handle object de-nesting after content If this class is a nestable object, removing the last nested class prefix ends further nesting in the object. If this class is not a nestable object, the list of classes should not be altered as we didn't affect the nesting levels in :py:meth:.", + "type": "method", + "file_path": "sphinx\\sphinx\\domains\\javascript.py", + "ast_data": "FunctionDef name:after_content arg:self arguments arg Assign Call If With Call Call Assign Compare Call" + }, + { + "library": "authlib", + "name": "validate_request_object_signing_alg", + "source_code": "def validate_request_object_signing_alg(self):\n self._validate_claim_value('request_object_signing_alg')", + "docstring": "JWS [JWS] alg algorithm [JWA] that MUST be used for signing Request Objects sent to the OP. All Request Objects from this Client MUST be rejected, if not signed with this algorithm. Request Objects are described in Section 6.1 of OpenID Connect Core 1.0 [OpenID.Core]. This algorithm MUST be used both when the Request Object is passed by value (using the request parameter) and when it is passed by reference (using the request_uri parameter). Servers SHOULD support RS256. The value none MAY be used. The default, if omitted, is that any algorithm supported by the OP and the RP MAY be used.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\registration\\claims.py", + "ast_data": "FunctionDef name:validate_request_object_signing_alg arg:self arguments arg Call" + }, + { + "library": "django", + "name": "check_storages", + "source_code": "def check_storages(app_configs=None, **kwargs):\n errors = []\n if STATICFILES_STORAGE_ALIAS not in settings.STORAGES:\n errors.append(E005)\n return errors", + "docstring": "Ensure staticfiles is defined in STORAGES setting.", + "type": "function", + "file_path": "django\\django\\contrib\\staticfiles\\checks.py", + "ast_data": "FunctionDef name:check_storages arg:app_configs arguments arg arg Assign If Compare Call Return return:yes" + }, + { + "library": "pandas", + "name": "to_html", + "source_code": "@Substitution(buf=buffering_args, encoding=encoding_args)\ndef to_html(self, buf: FilePath | WriteBuffer[str] | None=None, *, table_uuid: str | None=None, table_attributes: str | None=None, sparse_index: bool | None=None, sparse_columns: bool | None=None, bold_headers: bool=False, caption: str | None=None, max_rows: int | None=None, max_columns: int | None=None, encoding: str | None=None, doctype_html: bool=False, exclude_styles: bool=False, **kwargs) -> str | None:\n obj = self._copy(deepcopy=True)\n if table_uuid:\n obj.set_uuid(table_uuid)\n if table_attributes:\n obj.set_table_attributes(table_attributes)\n if sparse_index is None:\n sparse_index = get_option('styler.sparse.index')\n if sparse_columns is None:\n sparse_columns = get_option('styler.sparse.columns')\n if bold_headers:\n obj.set_table_styles([{'selector': 'th', 'props': 'font-weight: bold;'}], overwrite=False)\n if caption is not None:\n obj.set_caption(caption)\n html = obj._render_html(sparse_index=sparse_index, sparse_columns=sparse_columns, max_rows=max_rows, max_cols=max_columns, exclude_styles=exclude_styles, encoding=encoding or get_option('styler.render.encoding'), doctype_html=doctype_html, **kwargs)\n return save_to_buffer(html, buf=buf, encoding=encoding if buf is not None else None)", + "docstring": "Write Styler to a file, buffer or string in HTML-CSS format. .. versionadded:: 1.3.0 Parameters ---------- %(buf)s table_uuid : str, optional Id attribute assigned to the HTML element in the format: ` elements. exclude_styles : bool, default False Whether to include the bufNone`. See Also -------- DataFrame.to_html: Write a DataFrame to a file, buffer or string in HTML format. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4]}) >>> print(df.style.to_html()) # doctest: +SKIP   A B ...", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\style.py", + "ast_data": "FunctionDef name:to_html arg:self arg:buf arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Call If Call If Call If Compare Assign Call If Compare Assign Call If Call If Compare Call Assign Call BoolOp Call Return return:yes Call Compare Call" + }, + { + "library": "numpy", + "name": "_slice_at_axis", + "source_code": "def _slice_at_axis(sl, axis):\n return (slice(None),) * axis + (sl,) + (...,)", + "docstring": "Construct tuple of slices to slice an array in the given dimension. Parameters ---------- sl : slice The slice for the given dimension. axis : int The axis to which is applied. All other dimensions are left \"unsliced\". Returns ------- sl : tuple of slices A tuple with slices matching in length. Examples -------- >>> np._slice_at_axis(slice(None, 3, -1), 1) (slice(None, None, None), slice(None, 3, -1), (...,))", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_arraypad_impl.py", + "ast_data": "FunctionDef name:_slice_at_axis arg:sl arg:axis arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "task_id", + "source_code": "@task_id.setter\ndef task_id(self, task_id):\n self._task_id = task_id", + "docstring": "Setter of property. See property doc.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py", + "ast_data": "FunctionDef name:task_id arg:self arg:task_id arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "convert_fields", + "source_code": "def convert_fields(fields, field_values):\n _convert_fields(fields, field_values, context=_ConversionContext.VALUE)", + "docstring": "Type-checks and converts each field in (in place). Args: fields: A list of objects. field_values: A mapping field names to values. Must contain an entry for each field. I.e., must be equal to . Raises: ValueError: If the keys of do not match the names of the fields in . TypeError: If any value in does not have the type indicated by the corresponding object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type_field.py", + "ast_data": "FunctionDef name:convert_fields arg:fields arg:field_values arguments arg arg Call" + }, + { + "library": "uvicorn", + "name": "ProxyHeadersMiddleware", + "source_code": "class ProxyHeadersMiddleware:\n\n def __init__(self, app: ASGI3Application, trusted_hosts: list[str] | str='127.0.0.1') -> None:\n self.app = app\n self.trusted_hosts = _TrustedHosts(trusted_hosts)\n\n async def __call__(self, scope: Scope, receive: ASGIReceiveCallable, send: ASGISendCallable) -> None:\n if scope['type'] == 'lifespan':\n return await self.app(scope, receive, send)\n client_addr = scope.get('client')\n client_host = client_addr[0] if client_addr else None\n if client_host in self.trusted_hosts:\n headers = dict(scope['headers'])\n if b'x-forwarded-proto' in headers:\n x_forwarded_proto = headers[b'x-forwarded-proto'].decode('latin1').strip()\n if x_forwarded_proto in {'http', 'https', 'ws', 'wss'}:\n if scope['type'] == 'websocket':\n scope['scheme'] = x_forwarded_proto.replace('http', 'ws')\n else:\n scope['scheme'] = x_forwarded_proto\n if b'x-forwarded-for' in headers:\n x_forwarded_for = headers[b'x-forwarded-for'].decode('latin1')\n host = self.trusted_hosts.get_trusted_client_host(x_forwarded_for)\n if host:\n port = 0\n scope['client'] = (host, port)\n return await self.app(scope, receive, send)", + "docstring": "Middleware for handling known proxy headers This middleware can be used when a known proxy is fronting the application, and is trusted to be properly setting the and headers with the connecting client information. Modifies the and information so that they reference the connecting client, rather that the connecting proxy. References: - -", + "type": "class", + "file_path": "uvicorn\\uvicorn\\middleware\\proxy_headers.py", + "ast_data": "ClassDef name:ProxyHeadersMiddleware FunctionDef name:__init__ arg:self arg:app arg:trusted_hosts arguments arg arg arg Assign Assign Call AsyncFunctionDef name:__call__ arg:self arg:scope arg:receive arg:send arguments arg arg arg arg If Compare Return return:yes Call Assign Call Assign If Compare Assign Call If Compare Assign Call Call If Compare If Compare Assign Call Assign If Compare Assign Call Assign Call If Assign Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "approx_fprime", + "source_code": "def approx_fprime(xk, f, epsilon=_epsilon, *args):\n xk = np.asarray(xk, float)\n f0 = f(xk, *args)\n return approx_derivative(f, xk, method='2-point', abs_step=epsilon, args=args, f0=f0)", + "docstring": "Finite difference approximation of the derivatives of a scalar or vector-valued function. If a function maps from :math: to :math:, its derivatives form an m-by-n matrix called the Jacobian, where an element :math: is a partial derivative of f[i] with respect to `fxkargsxkf(m, n)xkxkffxkx` should be an array of size two.\" ... return c0 * x[0]**2 + c1*x[1]**2 >>> x = np.ones(2) >>> c0, c1 = (1, 200) >>> eps = np.sqrt(np.finfo(float).eps) >>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1) array([ 2. , 400.00004208])", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_optimize.py", + "ast_data": "FunctionDef name:approx_fprime arg:xk arg:f arg:epsilon arguments arg arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "__init__", + "source_code": "def __init__(self, default: Any, rebuild: _ConfigRebuild, valid_types: _OptValidTypes, description: str='') -> None:\n super().__setattr__('default', default)\n super().__setattr__('rebuild', rebuild)\n super().__setattr__('valid_types', valid_types)\n super().__setattr__('description', description)", + "docstring": "Configuration option type for Sphinx. The type is intended to be immutable; changing the field values is an unsupported action. No validation is performed on the values, though consumers will likely expect them to be of the types advertised. The old tuple-based interface will be removed in Sphinx 9.", + "type": "method", + "file_path": "sphinx\\sphinx\\config.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:default arg:rebuild arg:valid_types arg:description arguments arg arg arg arg arg Call Call Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_modify_model_input_type", + "source_code": "def _modify_model_input_type(model, inference_input_type=dtypes.float32):\n if inference_input_type == dtypes.float32:\n return\n if not model.signatureDefs:\n _modify_model_input_type_per_subgraph(model, 0, -1, inference_input_type)\n return\n for signature_index, signature_def in enumerate(model.signatureDefs):\n _modify_model_input_type_per_subgraph(model, signature_def.subgraphIndex, signature_index, inference_input_type)", + "docstring": "Modify model input type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py", + "ast_data": "FunctionDef name:_modify_model_input_type arg:model arg:inference_input_type arguments arg arg If Compare Return return:no If Call Return return:no For Call Call" + }, + { + "library": "pytorch", + "name": "check_prologue_fusion_heuristics_fusable", + "source_code": "def check_prologue_fusion_heuristics_fusable(self, prologue_node: BaseSchedulerNode, template_node: BaseSchedulerNode, why: WhyNoFuse) -> bool:\n if prologue_node.get_operation_names() <= V.graph.invoke_quant_ops:\n return True\n read_bytes = prologue_node.get_read_buffer_sizes()\n write_bytes = prologue_node.get_write_buffer_sizes()\n BYTES_THRESHOLD_MULTIPLIER = 1.1\n if read_bytes > write_bytes * BYTES_THRESHOLD_MULTIPLIER:\n why('prologue fusion will not increase amount of bytes read in kernel')\n return False\n origins = tuple((e.target for n in prologue_node.get_nodes() if n.node is not None for e in n.node.get_origins() if e.op == 'call_function'))\n if origins == (torch.ops.aten.constant_pad_nd.default,):\n why('prologue fusion will not increase attempt to fuse in padding bc it increases unaligned reads')\n return False\n\n def low_prec_fp(dtype: torch.dtype) -> bool:\n return dtype.itemsize <= 2 and dtype.is_floating_point\n if low_prec_fp(template_node.get_template_node_or_throw().dtype) and (not prologue_node.can_codegen_in_low_precision()):\n why('prologue fusion that must be upcast to fp32 not profitable for low precision templates')\n return False\n return True", + "docstring": "Heuristics to avoid benchmarking predictably slow prologue fusions", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:check_prologue_fusion_heuristics_fusable arg:self arg:prologue_node arg:template_node arg:why arguments arg arg arg arg If Compare Call Return return:yes Assign Call Assign Call Assign If Compare Call Return return:yes Assign Call Call Compare Call Compare If Compare Call Return return:yes FunctionDef name:low_prec_fp arg:dtype arguments arg Return return:yes BoolOp Compare If BoolOp Call Call Call Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_verts", + "source_code": "def get_verts(self):\n trans = self.get_transform()\n path = self.get_path()\n polygons = path.to_polygons(trans)\n if len(polygons):\n return polygons[0]\n return []", + "docstring": "Return a copy of the vertices used in this patch. If the patch contains Bézier curves, the curves will be interpolated by line segments. To access the curves as curves, use .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:get_verts arg:self arguments arg Assign Call Assign Call Assign Call If Call Return return:yes Return return:no" + }, + { + "library": "scipy", + "name": "getrow", + "source_code": "def getrow(self, i):\n return self._getrow(i)", + "docstring": "Returns a copy of row i of the matrix, as a (1 x n) sparse matrix (row vector).", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_matrix.py", + "ast_data": "FunctionDef name:getrow arg:self arg:i arguments arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "autoscale", + "source_code": "def autoscale(self):\n dmin, dmax = self.datalim_to_dt()\n vmin = mdates.date2num(dmin)\n vmax = mdates.date2num(dmax)\n return self.nonsingular(vmin, vmax)", + "docstring": "Set the view limits to include the data range.", + "type": "method", + "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py", + "ast_data": "FunctionDef name:autoscale arg:self arguments arg Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "KeyEvent", + "source_code": "class KeyEvent(LocationEvent):\n\n def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):\n super().__init__(name, canvas, x, y, guiEvent=guiEvent)\n self.key = key", + "docstring": "A key event (key press, key release). A KeyEvent has a number of special attributes in addition to those defined by the parent and classes. Attributes ---------- key : None or str The key(s) pressed. Could be *None*, a single case sensitive Unicode character (\"g\", \"G\", \"#\", etc.), a special key (\"control\", \"shift\", \"f1\", \"up\", etc.) or a combination of the above (e.g., \"ctrl+alt+g\", \"ctrl+alt+G\"). Notes ----- Modifier keys will be prefixed to the pressed key and will be in the order \"ctrl\", \"alt\", \"super\". The exception to this rule is when the pressed key is itself a modifier key, therefore \"ctrl+alt\" and \"alt+control\" can both be valid key values. Examples -------- :: def on_key(event): print('you pressed', event.key, event.xdata, event.ydata) cid = fig.canvas.mpl_connect('key_press_event', on_key)", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "ClassDef name:KeyEvent FunctionDef name:__init__ arg:self arg:name arg:canvas arg:key arg:x arg:y arg:guiEvent arguments arg arg arg arg arg arg arg Call Call Assign" + }, + { + "library": "scipy", + "name": "include_event", + "source_code": "def include_event(event, options):\n return event['ph'] == 'X' and event['dur'] >= options['granularity'] and (not event['name'].startswith('Total'))", + "docstring": "Only include events if they are complete events, are longer than granularity, and are not totals.", + "type": "function", + "file_path": "scipy\\tools\\ninjatracing.py", + "ast_data": "FunctionDef name:include_event arg:event arg:options arguments arg arg Return return:yes BoolOp Compare Compare Call" + }, + { + "library": "pandas", + "name": "set_flags", + "source_code": "@final\ndef set_flags(self, *, copy: bool | lib.NoDefault=lib.no_default, allows_duplicate_labels: bool | None=None) -> Self:\n self._check_copy_deprecation(copy)\n df = self.copy(deep=False)\n if allows_duplicate_labels is not None:\n df.flags['allows_duplicate_labels'] = allows_duplicate_labels\n return df", + "docstring": "Return a new object with updated flags. This method creates a shallow copy of the original object, preserving its underlying data while modifying its global flags. In particular, it allows you to update properties such as whether duplicate labels are permitted. This behavior is especially useful in method chains, where one wishes to adjust DataFrame or Series characteristics without altering the original object. Parameters ---------- copy : bool, default False Specify if a copy of the object should be made. .. note:: The keyword will change behavior in pandas 3.0. __ will be enabled by default, which means that all methods with a keyword will use a lazy copy mechanism to defer the copy and ignore the keyword. The keyword will be removed in a future version of pandas. You can already get the future behavior and improvements through enabling copy on write `DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 2]}) >>> df.flags.allows_duplicate_labels True >>> df2 = df.set_flags(allows_duplicate_labels=False) >>> df2.flags.allows_duplicate_labels False", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:set_flags arg:self arguments arg arg arg Call Assign Call If Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "initial_value", + "source_code": "@property\ndef initial_value(self):\n return self._initial_value", + "docstring": "Returns the Tensor used as the initial value for the variable. Note that this is different from which runs the op that initializes the variable before returning its value. This method returns the tensor that is used by the op that initializes the variable. Returns: A .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", + "ast_data": "FunctionDef name:initial_value arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "create_profiler_ui", + "source_code": "def create_profiler_ui(graph, run_metadata, ui_type='readline', on_ui_exit=None, config=None):\n del config\n analyzer = ProfileAnalyzer(graph, run_metadata)\n cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit)\n cli.register_command_handler('list_profile', analyzer.list_profile, analyzer.get_help('list_profile'), prefix_aliases=['lp'])\n cli.register_command_handler('print_source', analyzer.print_source, analyzer.get_help('print_source'), prefix_aliases=['ps'])\n return cli", + "docstring": "Create an instance of ReadlineUI based on a and . Args: graph: Python object. run_metadata: A protobuf object. ui_type: (str) requested UI type, e.g., \"readline\". on_ui_exit: () the callback to be called when the UI exits. config: An instance of . Returns: (base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer commands and tab-completions registered.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\profile_analyzer_cli.py", + "ast_data": "FunctionDef name:create_profiler_ui arg:graph arg:run_metadata arg:ui_type arg:on_ui_exit arg:config arguments arg arg arg arg arg Assign Call Assign Call Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "saveable_object_to_tensor_dict", + "source_code": "def saveable_object_to_tensor_dict(saveables):\n tensor_dict = {}\n for saveable in saveables:\n for spec in saveable.specs:\n name = _convert_to_string(spec.name)\n slice_spec = _convert_to_string(spec.slice_spec)\n tensor = spec if callable(spec._tensor) else spec._tensor\n if slice_spec:\n tensor_dict.setdefault(name, {})[slice_spec] = tensor\n else:\n tensor_dict[name] = tensor\n return tensor_dict", + "docstring": "Converts a list of SaveableObjects to a tensor dictionary.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py", + "ast_data": "FunctionDef name:saveable_object_to_tensor_dict arg:saveables arguments arg Assign For For Assign Call Assign Call Assign Call If Assign Call Assign Return return:yes" + }, + { + "library": "django", + "name": "unregister_serializer", + "source_code": "def unregister_serializer(format):\n if not _serializers:\n _load_serializers()\n if format not in _serializers:\n raise SerializerDoesNotExist(format)\n del _serializers[format]", + "docstring": "Unregister a given serializer. This is not a thread-safe operation.", + "type": "function", + "file_path": "django\\django\\core\\serializers\\__init__.py", + "ast_data": "FunctionDef name:unregister_serializer arg:format arguments arg If Call If Compare Raise Call" + }, + { + "library": "numpy", + "name": "std", + "source_code": "def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, mean=np._NoValue):\n kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}\n dvar = self.var(axis, dtype, out, ddof, **kwargs)\n if dvar is not masked:\n if out is not None:\n np.power(out, 0.5, out=out, casting='unsafe')\n return out\n dvar = sqrt(dvar)\n return dvar", + "docstring": "Returns the standard deviation of the array elements along given axis. Masked entries are ignored. Refer to for full documentation. See Also -------- numpy.ndarray.std : corresponding function for ndarrays numpy.std : Equivalent function", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:std arg:self arg:axis arg:dtype arg:out arg:ddof arg:keepdims arg:mean arguments arg arg arg arg arg arg arg Assign Compare Assign Call If Compare If Compare Call Return return:yes Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_ref_args", + "source_code": "def _get_ref_args(self, node):\n op_def = op_def_registry.get(node.op)\n if op_def is None:\n return []\n ref_args = []\n for i, output_arg in enumerate(op_def.output_arg):\n if output_arg.is_ref:\n arg_name = node.name if i == 0 else '%s:%d' % (node.name, i)\n ref_args.append(arg_name)\n return ref_args", + "docstring": "Determine whether an input of an op is ref-type. Args: node: A . Returns: A list of the arg names (as strs) that are ref-type.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py", + "ast_data": "FunctionDef name:_get_ref_args arg:self arg:node arguments arg arg Assign Call If Compare Return return:no Assign For Call If Assign Compare Call Return return:yes" + }, + { + "library": "sphinx", + "name": "_desc_classes_injector", + "source_code": "class _desc_classes_injector(nodes.Element, not_smartquotable):\n classes: list[str] = []\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n self['classes'].extend(self.classes)", + "docstring": "Helper base class for injecting a fixed list of classes. Use as the first base class.", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:_desc_classes_injector FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "find_all_hinted_output_nodes", + "source_code": "def find_all_hinted_output_nodes(session=None, graph_def=None):\n if session is not None and graph_def is not None:\n raise ValueError('Provide only one of session and graph_def.')\n hinted_outputs_nodes = []\n if session is not None:\n hints = _find_all_hints_in_nodes(session.graph_def.node)\n elif graph_def is not None:\n hints = _find_all_hints_in_nodes(graph_def.node)\n for hint in hints.values():\n _, output_nodes = hint.flattened_inputs_and_outputs()\n hinted_outputs_nodes.extend(output_nodes)\n return hinted_outputs_nodes", + "docstring": "Find all Ophints output nodes in the graph. This is used to get all the output nodes those are ophinted, it is important for operation like convert_variables_to_constants keep all ophints structure. Note: only one of session or graph_def should be used, not both. Why this can be useful? Some TensorFlow ops (e.g. bidirectional rnn), can generate multiple outputs for unfused subgraph. If not all output nodes are consumed, graph optimization can potentially drop the unused nodes and cause ophints in an invalid states (due to missing ophinted output nodes). So it's important for us to find all those hinted output nodes and make sure they're not discarded away. Args: session: A TensorFlow session that contains the graph to convert. graph_def: A graph def that we should convert. Returns: A list of OpHints output nodes. Raises: ValueError: If both session and graph_def are provided.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py", + "ast_data": "FunctionDef name:find_all_hinted_output_nodes arg:session arg:graph_def arguments arg arg If BoolOp Compare Compare Raise Call Assign If Compare Assign Call If Compare Assign Call For Call Assign Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "score", + "source_code": "def score(self, X, y, sample_weight=None):\n return super().score(X, y, sample_weight)", + "docstring": "Return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters ---------- X : array-like of shape (n_samples, n_features), or None Test samples. If , predictions for all indexed points are used; in this case, points are not considered their own neighbors. This means that implicitly performs a leave-one-out cross-validation procedure and is equivalent to but typically much faster. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True labels for . sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float Mean accuracy of `y`.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neighbors\\_classification.py", + "ast_data": "FunctionDef name:score arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_is_safe_to_split", + "source_code": "def _is_safe_to_split() -> bool:\n return False if _get_default_group().bound_device_id is None else True", + "docstring": "Checks if it is safe to split the any process group in the world. This is only safe if the default pg has a bound device id, otherwise users must be aware that a pg is only splittable after the first collective is issued.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:_is_safe_to_split arguments Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "inbound_nodes", + "source_code": "@property\n@doc_controls.do_not_doc_inheritable\ndef inbound_nodes(self):\n return self._inbound_nodes", + "docstring": "Deprecated, do NOT use! Only for compatibility with external Keras.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", + "ast_data": "FunctionDef name:inbound_nodes arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "shape", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef shape(x):\n return array_ops.shape(x)", + "docstring": "Returns the symbolic shape of a tensor or variable. Args: x: A tensor or variable. Returns: A symbolic shape (which is itself a tensor). Examples: >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val) >>> tf.keras.backend.shape(kvar) >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> tf.keras.backend.shape(input)", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:shape arg:x arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "load", + "source_code": "def load(export_dir, tags, skip_restoring_checkpoint=False):\n metrics.IncrementReadApi(_LOAD_V1_V2_LABEL)\n loader = _EagerSavedModelLoader(export_dir)\n result = loader.load(tags=tags, skip_restoring_checkpoint=skip_restoring_checkpoint)\n metrics.IncrementRead(write_version='1')\n return result", + "docstring": "Load a v1-style SavedModel as an object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load_v1_in_v2.py", + "ast_data": "FunctionDef name:load arg:export_dir arg:tags arg:skip_restoring_checkpoint arguments arg arg arg Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "Parser", + "source_code": "class Parser:\n\n def __init__(self, code: str, encoding: str='utf-8') -> None:\n self.code = filter_whitespace(code)\n self.encoding = encoding\n self.annotations: dict[tuple[str, str], str] = {}\n self.comments: dict[tuple[str, str], str] = {}\n self.deforders: dict[str, int] = {}\n self.definitions: dict[str, tuple[str, int, int]] = {}\n self.finals: list[str] = []\n self.overloads: dict[str, list[Signature]] = {}\n\n def parse(self) -> None:\n self.parse_comments()\n self.parse_definition()\n\n def parse_comments(self) -> None:\n tree = ast.parse(self.code, type_comments=True)\n picker = VariableCommentPicker(self.code.splitlines(True), self.encoding)\n picker.visit(tree)\n self.annotations = picker.annotations\n self.comments = picker.comments\n self.deforders = picker.deforders\n self.finals = picker.finals\n self.overloads = picker.overloads\n\n def parse_definition(self) -> None:\n parser = DefinitionFinder(self.code.splitlines(True))\n parser.parse()\n self.definitions = parser.definitions", + "docstring": "Python source code parser to pick up variable comments. This is a better wrapper for ``.", + "type": "class", + "file_path": "sphinx\\sphinx\\pycode\\parser.py", + "ast_data": "ClassDef name:Parser FunctionDef name:__init__ arg:self arg:code arg:encoding arguments arg arg arg Assign Call Assign FunctionDef name:parse arg:self arguments arg Call Call FunctionDef name:parse_comments arg:self arguments arg Assign Call Assign Call Call Call Assign Assign Assign Assign Assign FunctionDef name:parse_definition arg:self arguments arg Assign Call Call Call Assign" + }, + { + "library": "pytorch", + "name": "_prepare_input_for_pytorch", + "source_code": "def _prepare_input_for_pytorch(args, kwargs):\n if isinstance(args, (torch.Tensor, dict)):\n args = (args,)\n args = copy.deepcopy(args)\n if kwargs:\n kwargs = copy.deepcopy(kwargs)\n else:\n kwargs = {}\n return (args, kwargs)", + "docstring": "Prepare input for PyTorch model execution. Any future changes/formatting to the input before dispatching to the PyTorch model should be made in this function. Args: args: positional arguments for PyTorch model forward method. kwargs: keyword arguments for PyTorch model forward method. Returns: args: positional arguments for PyTorch model forward method. kwargs: keyword arguments for PyTorch model forward method.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\verification.py", + "ast_data": "FunctionDef name:_prepare_input_for_pytorch arg:args arg:kwargs arguments arg arg If Call Assign Assign Call If Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "relu_layer", + "source_code": "@tf_export(v1=['nn.relu_layer'])\n@dispatch.add_dispatch_support\ndef relu_layer(x, weights, biases, name=None):\n with ops.name_scope(name, 'relu_layer', [x, weights, biases]) as name:\n x = ops.convert_to_tensor(x, name='x')\n weights = ops.convert_to_tensor(weights, name='weights')\n biases = ops.convert_to_tensor(biases, name='biases')\n xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)\n return nn_ops.relu(xw_plus_b, name=name)", + "docstring": "Computes Relu(x * weight + biases). Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified \"nn_relu_layer\" is used. Returns: A 2-D Tensor computing relu(matmul(x, weights) + biases). Dimensions typically: batch, out_units.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_impl.py", + "ast_data": "FunctionDef name:relu_layer arg:x arg:weights arg:biases arg:name arguments arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "pygame", + "name": "array_green", + "source_code": "def array_green(surface):\n size = surface.get_size()\n array = numpy.empty(size, numpy.uint8)\n surface_to_array(array, surface, 'G')\n return array", + "docstring": "pygame.surfarray.array_green(Surface): return array copy pixel green into a 2d array Copy the pixel green values from a Surface into a 2D array. This will work for any type of Surface format. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method).", + "type": "function", + "file_path": "pygame\\src_py\\surfarray.py", + "ast_data": "FunctionDef name:array_green arg:surface arguments arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "get", + "source_code": "def get(self, name: str) -> Theme:\n if name in self.themes:\n theme = self.themes[name]\n else:\n theme = self.find_user_theme(name) or Theme(name)\n theme.update(self.config)\n return theme", + "docstring": "Get a theme for given *name*.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\latex\\theming.py", + "ast_data": "FunctionDef name:get arg:self arg:name arguments arg arg If Compare Assign Assign BoolOp Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_start_trajectory", + "source_code": "def _start_trajectory(self, xm, ym, broken_streamlines=True):\n self._traj = []\n self._update_trajectory(xm, ym, broken_streamlines)", + "docstring": "Start recording streamline trajectory", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\streamplot.py", + "ast_data": "FunctionDef name:_start_trajectory arg:self arg:xm arg:ym arg:broken_streamlines arguments arg arg arg arg Assign Call" + }, + { + "library": "tensorflow", + "name": "grad", + "source_code": "def grad(self, source, flow=None, name=None):\n raise NotImplementedError()", + "docstring": "Not supported.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:grad arg:self arg:source arg:flow arg:name arguments arg arg arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "prev", + "source_code": "@property\ndef prev(self) -> 'Node':\n return self._prev", + "docstring": "Returns the previous `` in the linked list of Nodes.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\node.py", + "ast_data": "FunctionDef name:prev arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "xlim", + "source_code": "def xlim(*args, **kwargs) -> tuple[float, float]:\n ax = gca()\n if not args and (not kwargs):\n return ax.get_xlim()\n ret = ax.set_xlim(*args, **kwargs)\n return ret", + "docstring": "Get or set the x limits of the current Axes. Call signatures:: left, right = xlim() # return the current xlim xlim((left, right)) # set the xlim to left, right xlim(left, right) # set the xlim to left, right If you do not specify args, you can pass *left* or *right* as kwargs, i.e.:: xlim(right=3) # adjust the right leaving left unchanged xlim(left=1) # adjust the left leaving right unchanged Setting limits turns autoscaling off for the x-axis. Returns ------- left, right A tuple of the new x-axis limits. Notes ----- Calling this function with no arguments (e.g. `~.Axes.get_xlim~.Axes.set_xlim` on the current Axes. All arguments are passed though.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", + "ast_data": "FunctionDef name:xlim arguments arg arg Assign Call If BoolOp Return return:yes Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "_get_to_python", + "source_code": "def _get_to_python(self, field):\n while field.remote_field is not None:\n field = field.remote_field.get_related_field()\n return field.to_python", + "docstring": "If the field is a related field, fetch the concrete field's (that is, the ultimate pointed-to field's) to_python.", + "type": "method", + "file_path": "django\\django\\forms\\models.py", + "ast_data": "FunctionDef name:_get_to_python arg:self arg:field arguments arg arg While Compare Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "hermegrid2d", + "source_code": "def hermegrid2d(x, y, c):\n return pu._gridnd(hermeval, c, x, y)", + "docstring": "Evaluate a 2-D HermiteE series on the Cartesian product of x and y. This function returns the values: .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) where the points `axbyxyxyxyccxyxycxy`. See Also -------- hermeval, hermeval2d, hermeval3d, hermegrid3d", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\hermite_e.py", + "ast_data": "FunctionDef name:hermegrid2d arg:x arg:y arg:c arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_device_assignments", + "source_code": "@property\ndef _device_assignments(self) -> list[traceable_stack.TraceableObject]:\n return self._device_code_locations or []", + "docstring": "Code locations for device context managers active at op creation. This property will return a list of traceable_stack.TraceableObject instances where .obj is a string representing the assigned device (or information about the function that would be applied to this op to compute the desired device) and the filename and lineno members record the location of the relevant device context manager. For example, suppose file_a contained these lines: file_a.py: 15: with tf.device('/gpu:0'): 16: node_b = tf.constant(4, name='NODE_B') Then a TraceableObject t_obj representing the device context manager would have these member values: t_obj.obj -> '/gpu:0' t_obj.filename = 'file_a.py' t_obj.lineno = 15 and node_b.op._device_assignments would return the list [t_obj]. Returns: [str: traceable_stack.TraceableObject, ...] as per this method's description, above.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_device_assignments arg:self arguments arg Return return:yes BoolOp" + }, + { + "library": "tensorflow", + "name": "wrap_function", + "source_code": "def wrap_function(self, fn, signature, name=None):\n return self._wrap_function(fn, signature=signature, name=name)", + "docstring": "Wraps a TF 1.X function and returns an eager-compatible function. All functions wrapped in the same will have access to the same graph ( to get the graph object within a function, or to get the graph outside a function). Variables created within the function will be added to the list. Function inputs: All inputs to the function must be tensors (nested ok), with their shapes and dtypes defined in the argument. Function outputs: * The 1.X function may return tensors, variables, and ops. The wrapped eager-compatible function will always return tensors in the same nested structure. * Variables are replaced with a tensor containing the latest read values. * Returned ops are executed, and replaced with None. * The order of op execution and variable reads in the return is nondeterministic. For example: To ensure that ops in the function are executed (e.g. ops added to the collection), include them in the function returns. Args: fn: a 1.X tensorflow function. signature: a possibly nested sequence of specifying the shapes and dtypes of the arguments. name: an optional string name for the function. The function will be saved with key in the dictionary. Returns: An eager-compatible function.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\wrap_function.py", + "ast_data": "FunctionDef name:wrap_function arg:self arg:fn arg:signature arg:name arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "get_blas_macro_and_name", + "source_code": "def get_blas_macro_and_name(name, accelerate):\n if accelerate:\n if name in USE_OLD_ACCELERATE:\n return ('', f'{name}_')\n elif name == 'xerbla_array':\n return ('', name + '__')\n if name in WRAPPED_FUNCS:\n name = name + 'wrp'\n return ('F_FUNC', f'{name},{name.upper()}')\n return ('BLAS_FUNC', name)", + "docstring": "Complex-valued and some Accelerate functions have special symbols.", + "type": "function", + "file_path": "scipy\\scipy\\_build_utils\\_wrappers_common.py", + "ast_data": "FunctionDef name:get_blas_macro_and_name arg:name arg:accelerate arguments arg arg If If Compare Return return:yes If Compare Return return:yes If Compare Assign Return return:yes Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "partial_fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y, classes=None, sample_weight=None):\n return self._partial_fit(X, y, classes, _refit=False, sample_weight=sample_weight)", + "docstring": "Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once. This method has some performance and numerical stability overhead, hence it is better to call partial_fit on chunks of data that are as large as possible (as long as fitting in the memory budget) to hide the overhead. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Target values. classes : array-like of shape (n_classes,), default=None List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. sample_weight : array-like of shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). .. versionadded:: 0.17 Returns ------- self : object Returns the instance itself.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\naive_bayes.py", + "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arg:classes arg:sample_weight arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "reduce_scatter", + "source_code": "def reduce_scatter(output, input_list, op=ReduceOp.SUM, group=group.WORLD):\n return _Reduce_Scatter.apply(op, group, output, *input_list)", + "docstring": "Reduces, then scatters a list of tensors to all processes in a group. Arguments: output (Tensor): Output tensor. input_list (list[Tensor]): List of tensors to reduce and scatter. op (optional): One of the values from `` enum. Specifies an operation used for element-wise reductions. group (ProcessGroup, optional): The process group to work on. Returns: Tensor: Output of the collective.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\nn\\functional.py", + "ast_data": "FunctionDef name:reduce_scatter arg:output arg:input_list arg:op arg:group arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "convert", + "source_code": "@_export_metrics\ndef convert(self):\n graph_def, input_tensors, output_tensors = self._load_saved_model(self.saved_model_dir, self._saved_model_tags)\n if self.saved_model_dir is None or not self.experimental_new_converter:\n graph_def, _, _, _ = _freeze_saved_model(self.saved_model_dir, None, None, None, self._saved_model_tags, _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)\n self.saved_model_dir = None\n return super(TFLiteSavedModelConverterV2, self).convert(graph_def, input_tensors, output_tensors)\n trackable_obj = _load(self.saved_model_dir, self._saved_model_tags)\n if trackable_obj is None:\n self._debug_info = _get_debug_info(_build_debug_info_func(self._funcs[0].graph), graph_def)\n else:\n self._debug_info = _get_debug_info(_convert_debug_info_func(trackable_obj.graph_debug_info), graph_def)\n del trackable_obj\n gc.collect()\n return self._convert_from_saved_model(graph_def)", + "docstring": "Converts a TensorFlow GraphDef based on instance variables. Returns: The converted data in serialized format. Raises: ValueError: No concrete function is specified. Multiple concrete functions are specified. Input shape is not specified. Invalid quantization parameters.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:convert arg:self arguments arg Assign Call If BoolOp Compare Assign Call Assign Return return:yes Call Call Assign Call If Compare Assign Call Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "lazystr", + "source_code": "def lazystr(text):\n return lazy(str, str)(text)", + "docstring": "Shortcut for the common case of a lazy callable that returns str.", + "type": "function", + "file_path": "django\\django\\utils\\functional.py", + "ast_data": "FunctionDef name:lazystr arg:text arguments arg Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "_get_indent", + "source_code": "def _get_indent(lines):\n indent = sys.maxsize\n for line in lines:\n content = len(line.lstrip())\n if content:\n indent = min(indent, len(line) - content)\n if indent == sys.maxsize:\n indent = 0\n return indent", + "docstring": "Determines the leading whitespace that could be removed from all the lines.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_utils_impl.py", + "ast_data": "FunctionDef name:_get_indent arg:lines arguments arg Assign For Assign Call Call If Assign Call Call If Compare Assign Return return:yes" + }, + { + "library": "scipy", + "name": "roots_sh_chebyu", + "source_code": "def roots_sh_chebyu(n, mu=False):\n x, w, m = roots_chebyu(n, True)\n x = (x + 1) / 2\n m_us = _ufuncs.beta(1.5, 1.5)\n w *= m_us / m\n if mu:\n return (x, w, m_us)\n else:\n return (x, w)", + "docstring": "Gauss-Chebyshev (second kind, shifted) quadrature. Computes the sample points and weights for Gauss-Chebyshev quadrature. The sample points are the roots of the nth degree shifted Chebyshev polynomial of the second kind, :math:. These sample points and weights correctly integrate polynomials of degree :math: or less over the interval :math: with weight function :math:. See 22.2.9 in [AS]_ for more details. Parameters ---------- n : int quadrature order mu : bool, optional If True, return the sum of the weights, optional. Returns ------- x : ndarray Sample points w : ndarray Weights mu : float Sum of the weights See Also -------- scipy.integrate.fixed_quad References ---------- .. [AS] Milton Abramowitz and Irene A. Stegun, eds. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables. New York: Dover, 1972.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_orthogonal.py", + "ast_data": "FunctionDef name:roots_sh_chebyu arg:n arg:mu arguments arg arg Assign Call Assign Assign Call If Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self):\n self.root = TrieNode()\n self._hash = hashlib.md5(usedforsecurity=False)\n self._digest = self._hash.digest()", + "docstring": "Initialize the trie with an empty root node.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign Call Assign Call Assign Call" + }, + { + "library": "pytorch", + "name": "stack_trace", + "source_code": "@property\ndef stack_trace(self) -> str | None:\n return self.fx_node.meta.get('stack_trace')", + "docstring": "Returns the stack trace associated with this node.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py", + "ast_data": "FunctionDef name:stack_trace arg:self arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "decision_path", + "source_code": "def decision_path(self, X, check_input=True):\n X = self._validate_X_predict(X, check_input)\n return self.tree_.decision_path(X)", + "docstring": "Return the decision path in the tree. .. versionadded:: 0.18 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``. check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you're doing. Returns ------- indicator : sparse matrix of shape (n_samples, n_nodes) Return a node indicator CSR matrix where non zero elements indicates that the samples goes through the nodes.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\tree\\_classes.py", + "ast_data": "FunctionDef name:decision_path arg:self arg:X arg:check_input arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_set_dtype_policy", + "source_code": "def _set_dtype_policy(self, dtype):\n if isinstance(dtype, policy.Policy):\n self._dtype_policy = dtype\n elif isinstance(dtype, dict):\n self._dtype_policy = policy.deserialize(dtype)\n elif isinstance(dtype, str) and dtype in ('mixed_float16', 'mixed_bfloat16'):\n self._dtype_policy = policy.Policy(dtype)\n elif dtype:\n self._dtype_policy = policy.Policy(dtypes.as_dtype(dtype).name)\n else:\n self._dtype_policy = policy.global_policy()\n if self._dtype_policy.name == 'mixed_float16' and (not loss_scale_optimizer.strategy_supports_loss_scaling()):\n strategy = distribute_lib.get_strategy()\n raise ValueError('Mixed precision is not supported with the tf.distribute.Strategy: %s. Either stop using mixed precision by removing the use of the \"%s\" policy or use a different Strategy, e.g. a MirroredStrategy.' % (strategy.__class__.__name__, self._dtype_policy.name))\n if self._dtype_policy.compute_dtype:\n self._compute_dtype_object = dtypes.as_dtype(self._dtype_policy.compute_dtype)\n else:\n self._compute_dtype_object = None", + "docstring": "Sets self._dtype_policy.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:_set_dtype_policy arg:self arg:dtype arguments arg arg If Call Assign If Call Assign Call If BoolOp Call Compare Assign Call If Assign Call Call Assign Call If BoolOp Compare Call Assign Call Raise Call If Assign Call Assign" + }, + { + "library": "matplotlib", + "name": "contains_point", + "source_code": "def contains_point(self, point):\n return self.patch.contains_point(point, radius=1.0)", + "docstring": "Return whether *point* (pair of pixel coordinates) is inside the Axes patch.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:contains_point arg:self arg:point arguments arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "read_array", + "source_code": "def read_array(self, key: str, start: int | None=None, stop: int | None=None):\n import tables\n node = getattr(self.group, key)\n attrs = node._v_attrs\n transposed = getattr(attrs, 'transposed', False)\n if isinstance(node, tables.VLArray):\n ret = node[0][start:stop]\n dtype = getattr(attrs, 'value_type', None)\n if dtype is not None:\n ret = pd_array(ret, dtype=dtype)\n else:\n dtype = getattr(attrs, 'value_type', None)\n shape = getattr(attrs, 'shape', None)\n if shape is not None:\n ret = np.empty(shape, dtype=dtype)\n else:\n ret = node[start:stop]\n if dtype and dtype.startswith('datetime64'):\n tz = getattr(attrs, 'tz', None)\n ret = _set_tz(ret, tz, dtype)\n elif dtype == 'timedelta64':\n ret = np.asarray(ret, dtype='m8[ns]')\n if transposed:\n return ret.T\n else:\n return ret", + "docstring": "read an array for the specified node (off of group", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:read_array arg:self arg:key arg:start arg:stop arguments arg arg arg arg Assign Call Assign Assign Call If Call Assign Assign Call If Compare Assign Call Assign Call Assign Call If Compare Assign Call Assign If BoolOp Call Assign Call Assign Call If Compare Assign Call If Return return:yes Return return:yes" + }, + { + "library": "kornia", + "name": "top_hat", + "source_code": "def top_hat(tensor: torch.Tensor, kernel: torch.Tensor, structuring_element: Optional[torch.Tensor]=None, origin: Optional[List[int]]=None, border_type: str='geodesic', border_value: float=0.0, max_val: float=10000.0, engine: str='unfold') -> torch.Tensor:\n if not isinstance(tensor, torch.Tensor):\n raise TypeError(f'Input type is not a torch.Tensor. Got {type(tensor)}')\n if len(tensor.shape) != 4:\n raise ValueError(f'Input size must have 4 dimensions. Got {tensor.dim()}')\n if not isinstance(kernel, torch.Tensor):\n raise TypeError(f'Kernel type is not a torch.Tensor. Got {type(kernel)}')\n if len(kernel.shape) != 2:\n raise ValueError(f'Kernel size must have 2 dimensions. Got {kernel.dim()}')\n return tensor - opening(tensor, kernel=kernel, structuring_element=structuring_element, origin=origin, border_type=border_type, border_value=border_value, max_val=max_val, engine=engine)", + "docstring": "Return the top hat transformation of an image. .. image:: _static/img/top_hat.png That means, (image - opened_image) applying the same kernel in each channel. The kernel must have 2 dimensions. See :func: for details. Args: tensor: Image with shape :math:. kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give the set of neighbors of the center over which the operation is applied. Its shape is :math:. For full structural elements use torch.ones_like(structural_element). structuring_element: Structuring element used for the grayscale dilation. It may be a non-flat structuring element. origin: Origin of the structuring element. Default: `(B, C, H, W)here `__. Example: >>> tensor = torch.rand(1, 3, 5, 5) >>> kernel = torch.ones(3, 3) >>> top_hat_img = top_hat(tensor, kernel)", + "type": "function", + "file_path": "kornia\\kornia\\morphology\\morphology.py", + "ast_data": "FunctionDef name:top_hat arg:tensor arg:kernel arg:structuring_element arg:origin arg:border_type arg:border_value arg:max_val arg:engine arguments arg arg arg arg arg arg arg arg If Call Raise Call Call If Compare Call Raise Call Call If Call Raise Call Call If Compare Call Raise Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y=None, **params):\n if not _routing_enabled() and self.transform_input is not None:\n raise ValueError('The `transform_input` parameter can only be set if metadata routing is enabled. You can enable metadata routing using `sklearn.set_config(enable_metadata_routing=True)`.')\n routed_params = self._check_method_params(method='fit', props=params)\n Xt = self._fit(X, y, routed_params, raw_params=params)\n with _print_elapsed_time('Pipeline', self._log_message(len(self.steps) - 1)):\n if self._final_estimator != 'passthrough':\n last_step_params = self._get_metadata_for_step(step_idx=len(self) - 1, step_params=routed_params[self.steps[-1][0]], all_params=params)\n self._final_estimator.fit(Xt, y, **last_step_params['fit'])\n return self", + "docstring": "Fit the model. Fit all the transformers one after the other and sequentially transform the data. Finally, fit the transformed data using the final estimator. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **params : dict of str -> object - If (default): Parameters passed to the `enable_metadata_routing=Trueenable_metadata_routing=True~sklearn.set_configMetadata Routing User Guide ` for more details. Returns ------- self : object Pipeline with fitted steps.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\pipeline.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg If BoolOp Call Compare Raise Call Assign Call Assign Call With Call Call Call If Compare Assign Call Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "length_of_indexer", + "source_code": "def length_of_indexer(indexer, target=None) -> int:\n if target is not None and isinstance(indexer, slice):\n target_len = len(target)\n start = indexer.start\n stop = indexer.stop\n step = indexer.step\n if start is None:\n start = 0\n elif start < 0:\n start += target_len\n if stop is None or stop > target_len:\n stop = target_len\n elif stop < 0:\n stop += target_len\n if step is None:\n step = 1\n elif step < 0:\n start, stop = (stop + 1, start + 1)\n step = -step\n return (stop - start + step - 1) // step\n elif isinstance(indexer, (ABCSeries, ABCIndex, np.ndarray, list)):\n if isinstance(indexer, list):\n indexer = np.array(indexer)\n if indexer.dtype == bool:\n return indexer.sum()\n return len(indexer)\n elif isinstance(indexer, range):\n return (indexer.stop - indexer.start) // indexer.step\n elif not is_list_like_indexer(indexer):\n return 1\n raise AssertionError('cannot find the length of the indexer')", + "docstring": "Return the expected length of target[indexer] Returns ------- int", + "type": "function", + "file_path": "pandas\\pandas\\core\\indexers\\utils.py", + "ast_data": "FunctionDef name:length_of_indexer arg:indexer arg:target arguments arg arg If BoolOp Compare Call Assign Call Assign Assign Assign If Compare Assign If Compare If BoolOp Compare Compare Assign If Compare If Compare Assign If Compare Assign Assign Return return:yes If Call If Call Assign Call If Compare Return return:yes Call Return return:yes Call If Call Return return:yes If Call Return return:yes Raise Call" + }, + { + "library": "pytorch", + "name": "_rank_not_in_group", + "source_code": "def _rank_not_in_group(group: Optional[ProcessGroup]) -> bool:\n if group is None:\n return False\n return group == GroupMember.NON_GROUP_MEMBER", + "docstring": "Check if the current process's rank is not in a given group.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:_rank_not_in_group arg:group arguments arg If Compare Return return:yes Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, memory_profile):\n self.timeline = memory_profile.timeline\n self.categories = memory_profile._categories", + "docstring": "The minimum representation of the memory profile timeline includes the memory timeline and categories. The timeline consists of [timestamp, action, (TensorKey, version), numbytes] elements, to denote any actions (pre-existing, create, destroy, or increment_version) that occurred to a specific Tensor for a chunk of memory. The categories help map each (TensorKey, version) pair into a category.", + "type": "method", + "file_path": "pytorch\\torch\\profiler\\_memory_profiler.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:memory_profile arguments arg arg Assign Assign" + }, + { + "library": "pytorch", + "name": "StreamTransformExtension", + "source_code": "class StreamTransformExtension(Extension):\n\n @abc.abstractmethod\n def transform_to(self, output: IO[bytes]) -> IO[bytes]:\n pass\n\n @abc.abstractmethod\n def transform_from(self, input: IO[bytes]) -> IO[bytes]:\n pass", + "docstring": "An extension which performs transformation on a byte stream, such as compression or encryption. Implementations should try to be memory friendly and performant. For example, don't read the whole input, then transform it, and write it back. If at all possible, do it in chunks. But, don't read/transform/write one byte at a time, either.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\_extension.py", + "ast_data": "ClassDef name:StreamTransformExtension FunctionDef name:transform_to arg:self arg:output arguments arg arg FunctionDef name:transform_from arg:self arg:input arguments arg arg" + }, + { + "library": "django", + "name": "send_response", + "source_code": "async def send_response(self, response, send):\n response_headers = []\n for header, value in response.items():\n if isinstance(header, str):\n header = header.encode('ascii')\n if isinstance(value, str):\n value = value.encode('latin1')\n response_headers.append((bytes(header), bytes(value)))\n for c in response.cookies.values():\n response_headers.append((b'Set-Cookie', c.output(header='').encode('ascii').strip()))\n await send({'type': 'http.response.start', 'status': response.status_code, 'headers': response_headers})\n if response.streaming:\n async with aclosing(aiter(response)) as content:\n async for part in content:\n for chunk, _ in self.chunk_bytes(part):\n await send({'type': 'http.response.body', 'body': chunk, 'more_body': True})\n await send({'type': 'http.response.body'})\n else:\n for chunk, last in self.chunk_bytes(response.content):\n await send({'type': 'http.response.body', 'body': chunk, 'more_body': not last})", + "docstring": "Encode and send a response out over ASGI.", + "type": "method", + "file_path": "django\\django\\core\\handlers\\asgi.py", + "ast_data": "AsyncFunctionDef name:send_response arg:self arg:response arg:send arguments arg arg arg Assign For Call If Call Assign Call If Call Assign Call Call Call Call For Call Call Call Call Call Call If Call Call For Call Call Call For Call Call" + }, + { + "library": "tensorflow", + "name": "verify_single_cond_var", + "source_code": "def verify_single_cond_var(name, body_var, orelse_var):\n if body_var is None:\n raise ValueError(\"'{}' is None at the end of the main branch.\".format(name))\n if orelse_var is None:\n raise ValueError(\"'{}' is None at the end of the else branch.\".format(name))\n if isinstance(body_var, (bool, int, float, str, np.ndarray)):\n body_var = tensor_conversion.convert_to_tensor_v2(body_var)\n if isinstance(orelse_var, (bool, int, float, str, np.ndarray)):\n orelse_var = tensor_conversion.convert_to_tensor_v2(orelse_var)\n if not tensor_util.is_tf_type(body_var) or not tensor_util.is_tf_type(orelse_var):\n return\n if not hasattr(body_var, 'dtype') or not hasattr(orelse_var, 'dtype'):\n return\n if body_var.dtype != orelse_var.dtype:\n raise TypeError(\"'{}' has dtype {} in the main branch, but dtype {} in the else branch\".format(name, body_var.dtype.name, orelse_var.dtype.name))", + "docstring": "Verifies whether body_var and orelse_var are consistent.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py", + "ast_data": "FunctionDef name:verify_single_cond_var arg:name arg:body_var arg:orelse_var arguments arg arg arg If Compare Raise Call Call If Compare Raise Call Call If Call Assign Call If Call Assign Call If BoolOp Call Call Return return:no If BoolOp Call Call Return return:no If Compare Raise Call Call" + }, + { + "library": "matplotlib", + "name": "get_offset", + "source_code": "def get_offset(self):\n return self._offset", + "docstring": "Return offset of the container.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", + "ast_data": "FunctionDef name:get_offset arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, size=1, facecolor=None):\n facecolor = mpl._val_or_rc(facecolor, 'axes.edgecolor')\n self.size = size\n self._facecolor = facecolor\n super().__init__(size=size)", + "docstring": "Parameters ---------- size : float Size of the arrow as a fraction of the ticklabel size. facecolor : :mpltype:, default: :rc: Fill color. .. versionadded:: 3.7", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axisline_style.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:size arg:facecolor arguments arg arg arg Assign Call Assign Assign Call Call" + }, + { + "library": "pytorch", + "name": "realize", + "source_code": "def realize(self) -> VariableTracker:\n if self._cache.vt is None:\n self._cache.realize()\n assert self._cache.vt is not None\n return self._cache.vt", + "docstring": "Force construction of the real VariableTracker", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\variables\\lazy.py", + "ast_data": "FunctionDef name:realize arg:self arguments arg If Compare Call Compare Return return:yes" + }, + { + "library": "pandas", + "name": "_consolidate_inplace", + "source_code": "@final\ndef _consolidate_inplace(self) -> None:\n self._mgr = self._mgr.consolidate()", + "docstring": "Consolidate data in place and return None", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:_consolidate_inplace arg:self arguments arg Assign Call" + }, + { + "library": "tensorflow", + "name": "PerReplicaSpec", + "source_code": "class PerReplicaSpec(type_spec.TypeSpec):\n __slots__ = ['_value_specs']\n value_type = property(lambda self: PerReplica)\n\n def __init__(self, *value_specs):\n self._value_specs = tuple(value_specs)\n\n def _serialize(self):\n return self._value_specs\n\n @property\n def _component_specs(self):\n return self._value_specs\n\n def _to_components(self, value):\n replica_context = distribute_lib.get_replica_context()\n if replica_context is not None and replica_context.num_replicas_in_sync > 1:\n raise ValueError('Flattening a PerReplica to components is not supported in replica context.')\n return value._values\n\n def _from_components(self, tensor_list):\n return PerReplica(tensor_list)", + "docstring": "Type specification for a .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", + "ast_data": "ClassDef name:PerReplicaSpec Assign Assign Call arguments arg FunctionDef name:__init__ arg:self arguments arg arg Assign Call FunctionDef name:_serialize arg:self arguments arg Return return:yes FunctionDef name:_component_specs arg:self arguments arg Return return:yes FunctionDef name:_to_components arg:self arg:value arguments arg arg Assign Call If BoolOp Compare Compare Raise Call Return return:yes FunctionDef name:_from_components arg:self arg:tensor_list arguments arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit_transform", + "source_code": "def fit_transform(self, y):\n y = column_or_1d(y, warn=True)\n self.classes_, y = _unique(y, return_inverse=True)\n return y", + "docstring": "Fit label encoder and return encoded labels. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- y : array-like of shape (n_samples,) Encoded labels.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py", + "ast_data": "FunctionDef name:fit_transform arg:self arg:y arguments arg arg Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_set_dtype_policy", + "source_code": "def _set_dtype_policy(self, dtype):\n if isinstance(dtype, policy.Policy):\n self._dtype_policy = dtype\n elif isinstance(dtype, dict):\n self._dtype_policy = policy.deserialize(dtype)\n elif isinstance(dtype, str) and dtype in ('mixed_float16', 'mixed_bfloat16'):\n self._dtype_policy = policy.Policy(dtype)\n elif dtype:\n self._dtype_policy = policy.Policy(dtypes.as_dtype(dtype).name)\n else:\n self._dtype_policy = policy.global_policy()\n if self._dtype_policy.name == 'mixed_float16' and (not loss_scale_optimizer.strategy_supports_loss_scaling()):\n strategy = distribute_lib.get_strategy()\n raise ValueError('Mixed precision is not supported with the tf.distribute.Strategy: %s. Either stop using mixed precision by removing the use of the \"%s\" policy or use a different Strategy, e.g. a MirroredStrategy.' % (strategy.__class__.__name__, self._dtype_policy.name))\n if self._dtype_policy.compute_dtype:\n self._compute_dtype_object = dtypes.as_dtype(self._dtype_policy.compute_dtype)\n else:\n self._compute_dtype_object = None", + "docstring": "Sets self._dtype_policy.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", + "ast_data": "FunctionDef name:_set_dtype_policy arg:self arg:dtype arguments arg arg If Call Assign If Call Assign Call If BoolOp Call Compare Assign Call If Assign Call Call Assign Call If BoolOp Compare Call Assign Call Raise Call If Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "implicit_grad", + "source_code": "def implicit_grad(f):\n\n def grad_fn(*args, **kwds):\n return implicit_val_and_grad(f)(*args, **kwds)[1]\n return grad_fn", + "docstring": "Returns a function which differentiates f with respect to variables. The wrapped function returns the gradient of f when called with the same arguments. The gradient is with respect to all trainable TFE variables accessed by . This function is useful when the exact set of variables to differentiate with is not known ahead of time. Example: Args: f: function to be differentiated. If returns a scalar, this scalar will be differentiated. If returns a tensor or list of tensors, by default a scalar will be computed by adding all their values to produce a single scalar. Returns: A function which, when called, returns a list of (gradient, variable) pairs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py", + "ast_data": "FunctionDef name:implicit_grad arg:f arguments arg FunctionDef name:grad_fn arguments arg arg Return return:yes Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "redirect", + "source_code": "@contextmanager\ndef redirect(std: str, to_file: str):\n if std not in _VALID_STD:\n raise ValueError(f'unknown standard stream <{std}>, must be one of {_VALID_STD}')\n c_std = _c_std(std)\n python_std = _python_std(std)\n std_fd = python_std.fileno()\n\n def _redirect(dst):\n libc.fflush(c_std)\n python_std.flush()\n os.dup2(dst.fileno(), std_fd)\n with os.fdopen(os.dup(std_fd)) as orig_std, open(to_file, mode='w+b') as dst:\n _redirect(dst)\n try:\n yield\n finally:\n _redirect(orig_std)", + "docstring": "Redirect `` is assumed to exist and the destination file is overwritten if it already exists. .. note:: Due to buffering cross source writes are not guaranteed to appear in wall-clock order. For instance in the example below it is possible for the C-outputs to appear before the python outputs in the log file. Usage: :: # syntactic-sugar for redirect(\"stdout\", \"tmp/stdout.log\") with redirect_stdout(\"/tmp/stdout.log\"): print(\"python stdouts are redirected\") libc = ctypes.CDLL(\"libc.so.6\") libc.printf(b\"c stdouts are also redirected\" os.system(\"echo system stdouts are also redirected\") print(\"stdout restored\")", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\redirects.py", + "ast_data": "FunctionDef name:redirect arg:std arg:to_file arguments arg arg If Compare Raise Call Assign Call Assign Call Assign Call FunctionDef name:_redirect arg:dst arguments arg Call Call Call Call With Call Call Call Call Try Call" + }, + { + "library": "scrapy", + "name": "store", + "source_code": "def store(file):\n pass", + "docstring": "Store the given file stream", + "type": "method", + "file_path": "scrapy\\scrapy\\extensions\\feedexport.py", + "ast_data": "FunctionDef name:store arg:file arguments arg" + }, + { + "library": "tensorflow", + "name": "read_file_to_string", + "source_code": "def read_file_to_string(filename, binary_mode=False):\n if binary_mode:\n f = FileIO(filename, mode='rb')\n else:\n f = FileIO(filename, mode='r')\n return f.read()", + "docstring": "Reads the entire contents of a file to a string. Args: filename: string, path to a file binary_mode: whether to open the file in binary mode or not. This changes the type of the object returned. Returns: contents of the file as a string or bytes. Raises: errors.OpError: Raises variety of errors that are subtypes e.g. etc.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", + "ast_data": "FunctionDef name:read_file_to_string arg:filename arg:binary_mode arguments arg arg If Assign Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "form_valid", + "source_code": "def form_valid(self, form):\n return HttpResponseRedirect(self.get_success_url())", + "docstring": "If the form is valid, redirect to the supplied URL.", + "type": "method", + "file_path": "django\\django\\views\\generic\\edit.py", + "ast_data": "FunctionDef name:form_valid arg:self arg:form arguments arg arg Return return:yes Call Call" + }, + { + "library": "scrapy", + "name": "_has_ajax_crawlable_variant", + "source_code": "def _has_ajax_crawlable_variant(self, response: Response) -> bool:\n body = response.text[:self.lookup_bytes]\n return _has_ajaxcrawlable_meta(body)", + "docstring": "Return True if a page without hash fragment could be \"AJAX crawlable\".", + "type": "method", + "file_path": "scrapy\\scrapy\\downloadermiddlewares\\ajaxcrawl.py", + "ast_data": "FunctionDef name:_has_ajax_crawlable_variant arg:self arg:response arguments arg arg Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "ConstantInputWarning", + "source_code": "class ConstantInputWarning(DegenerateDataWarning):\n\n def __init__(self, msg=None):\n if msg is None:\n msg = 'All values in data are exactly equal; results may not be reliable.'\n self.args = (msg,)", + "docstring": "Warns when all values in data are exactly equal.", + "type": "class", + "file_path": "scipy\\scipy\\stats\\_warnings_errors.py", + "ast_data": "ClassDef name:ConstantInputWarning FunctionDef name:__init__ arg:self arg:msg arguments arg arg If Compare Assign Assign" + }, + { + "library": "pytorch", + "name": "strip_local_scope", + "source_code": "def strip_local_scope(s: str) -> str:\n import re\n pattern = 'L\\\\[\\\\s*[\\'\\\\\"](.*?)[\\'\\\\\"]\\\\s*\\\\]'\n return re.sub(pattern, '\\\\1', s)", + "docstring": "Replace occurrences of L[...] with just the inner content. Handles both single and double quotes. This is to generate user friendly recompilation messages.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\guards.py", + "ast_data": "FunctionDef name:strip_local_scope arg:s arguments arg Assign Return return:yes Call" + }, + { + "library": "numpy", + "name": "polypow", + "source_code": "def polypow(c, pow, maxpower=None):\n return pu._pow(np.convolve, c, pow, maxpower)", + "docstring": "Raise a polynomial to a power. Returns the polynomial raised to the power . The argument is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series `` Parameters ---------- c : array_like 1-D array of array of series coefficients ordered from low to high degree. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Power series of power. See Also -------- polyadd, polysub, polymulx, polymul, polydiv Examples -------- >>> from numpy.polynomial import polynomial as P >>> P.polypow([1, 2, 3], 2) array([ 1., 4., 10., 12., 9.])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\polynomial.py", + "ast_data": "FunctionDef name:polypow arg:c arg:pow arg:maxpower arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "from_reference", + "source_code": "@staticmethod\ndef from_reference(cls, ref_qconvt, output_scale, output_zero_point):\n qconv = cls(ref_qconvt.in_channels, ref_qconvt.out_channels, ref_qconvt.kernel_size, ref_qconvt.stride, ref_qconvt.padding, ref_qconvt.output_padding, ref_qconvt.groups, ref_qconvt.bias is not None, ref_qconvt.dilation, ref_qconvt.padding_mode, device=ref_qconvt.weight.device, dtype=ref_qconvt.weight.dtype)\n qweight = ref_qconvt.get_quantized_weight()\n qconv.set_weight_bias(qweight, ref_qconvt.bias)\n qconv.scale = float(output_scale)\n qconv.zero_point = int(output_zero_point)\n return qconv", + "docstring": "Create a (fbgemm/qnnpack) quantized module from a reference quantized module Args: ref_qconvt (Module): a reference quantized module, either produced by torch.ao.quantization utilities or provided by the user output_scale (float): scale for output Tensor output_zero_point (int): zero point for output Tensor", + "type": "method", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\conv.py", + "ast_data": "FunctionDef name:from_reference arg:cls arg:ref_qconvt arg:output_scale arg:output_zero_point arguments arg arg arg arg Assign Call Compare Assign Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "range", + "source_code": "def range(self, name='range'):\n with self._name_scope(name):\n return self.high - self.low", + "docstring": ".", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\uniform.py", + "ast_data": "FunctionDef name:range arg:self arg:name arguments arg arg With Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self, path, query_string=''):\n self.request = cherrypy.serving.request\n self.query_string = query_string\n if '?' in path:\n path, self.query_string = path.split('?', 1)\n path = urllib.parse.urljoin(self.request.path_info, path)\n self.path = path\n CherryPyException.__init__(self, path, self.query_string)", + "docstring": "Initialize the internal redirect exception.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cperror.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:path arg:query_string arguments arg arg arg Assign Assign If Compare Assign Call Assign Call Assign Call" + }, + { + "library": "scipy", + "name": "BiggsExp02", + "source_code": "class BiggsExp02(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([0] * 2, [20] * 2))\n self.global_optimum = [[1.0, 10.0]]\n self.fglob = 0\n\n def fun(self, x, *args):\n self.nfev += 1\n t = arange(1, 11.0) * 0.1\n y = exp(-t) - 5 * exp(-10 * t)\n vec = (exp(-t * x[0]) - 5 * exp(-t * x[1]) - y) ** 2\n return sum(vec)", + "docstring": "BiggsExp02 objective function. The BiggsExp02 [1]_ global optimization problem is a multimodal minimization problem defined as follows .. math:: \\begin{matrix} f_{\\text{BiggsExp02}}(x) = \\sum_{i=1}^{10} (e^{-t_i x_1} - 5 e^{-t_i x_2} - y_i)^2 \\\\ t_i = 0.1 i\\\\ y_i = e^{-t_i} - 5 e^{-10t_i}\\\\ \\end{matrix} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py", + "ast_data": "ClassDef name:BiggsExp02 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "scrapy", + "name": "SequenceExclude", + "source_code": "class SequenceExclude:\n\n def __init__(self, seq: Sequence[Any]):\n self.seq: Sequence[Any] = seq\n\n def __contains__(self, item: Any) -> bool:\n return item not in self.seq", + "docstring": "Object to test if an item is NOT within some sequence.", + "type": "class", + "file_path": "scrapy\\scrapy\\utils\\datatypes.py", + "ast_data": "ClassDef name:SequenceExclude FunctionDef name:__init__ arg:self arg:seq arguments arg arg FunctionDef name:__contains__ arg:self arg:item arguments arg arg Return return:yes Compare" + }, + { + "library": "kornia", + "name": "equalize", + "source_code": "def equalize(probability: float, _: int) -> OperationBase:\n return Equalize(probability)", + "docstring": "Return equalize op.", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\auto\\autoaugment\\ops.py", + "ast_data": "FunctionDef name:equalize arg:probability arg:_ arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "AlterIndexTogether", + "source_code": "class AlterIndexTogether(AlterTogetherOptionOperation):\n option_name = 'index_together'\n\n def __init__(self, name, index_together):\n super().__init__(name, index_together)", + "docstring": "Change the value of index_together to the target one. Input value of index_together must be a set of tuples.", + "type": "class", + "file_path": "django\\django\\db\\migrations\\operations\\models.py", + "ast_data": "ClassDef name:AlterIndexTogether Assign FunctionDef name:__init__ arg:self arg:name arg:index_together arguments arg arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "_convert_to_numpy_obj", + "source_code": "def _convert_to_numpy_obj(numpy_dtype, obj):\n return numpy_dtype(np.array(obj).astype(numpy_dtype)) if numpy_dtype is not object else str(obj)", + "docstring": "Explicitly convert obj based on numpy type except for string type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", + "ast_data": "FunctionDef name:_convert_to_numpy_obj arg:numpy_dtype arg:obj arguments arg arg Return return:yes Compare Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "excluded_from_module_rename", + "source_code": "def excluded_from_module_rename(module, import_rename_spec):\n for excluded_prefix in import_rename_spec.excluded_prefixes:\n if module.startswith(excluded_prefix):\n return True\n return False", + "docstring": "Check if this module import should not be renamed. Args: module: (string) module name. import_rename_spec: ImportRename instance. Returns: True if this import should not be renamed according to the import_rename_spec.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py", + "ast_data": "FunctionDef name:excluded_from_module_rename arg:module arg:import_rename_spec arguments arg arg For If Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "run_op_benchmark", + "source_code": "def run_op_benchmark(self, op, iters=1, warmup=True, session_config=None):\n if context.executing_eagerly():\n return self._run_eager_benchmark(iterable=op, iters=iters, warmup=warmup)\n return self._run_graph_benchmark(iterable=op, iters=iters, warmup=warmup, session_config=session_config)", + "docstring": "Benchmarks the op. Runs the op times. In each iteration, the benchmark measures the time it takes to go execute the op. Args: op: The tf op to benchmark. iters: Number of times to repeat the timing. warmup: If true, warms up the session caches by running an untimed run. session_config: A ConfigProto protocol buffer with configuration options for the session. Applicable only for benchmarking in graph mode. Returns: A float, representing the per-execution wall time of the op in seconds. This is the median time (with respect to ) it takes for the op to be executed num of times.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\benchmarks\\benchmark_base.py", + "ast_data": "FunctionDef name:run_op_benchmark arg:self arg:op arg:iters arg:warmup arg:session_config arguments arg arg arg arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "compute_workload", + "source_code": "def compute_workload(self):\n return None", + "docstring": "return the number of scalar operations it takes to finish the tensor op", + "type": "method", + "file_path": "pytorch\\benchmarks\\tensorexpr\\benchmark.py", + "ast_data": "FunctionDef name:compute_workload arg:self arguments arg Return return:no" + }, + { + "library": "pandas", + "name": "_maybe_cast_indexer", + "source_code": "def _maybe_cast_indexer(self, key):\n return key", + "docstring": "If we have a float key and are not a floating index, then try to cast to an int if equivalent.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_maybe_cast_indexer arg:self arg:key arguments arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "as_bytes", + "source_code": "def as_bytes(bytes_or_text, encoding='utf-8'):\n encoding = codecs.lookup(encoding).name\n if isinstance(bytes_or_text, bytearray):\n return bytes(bytes_or_text)\n elif isinstance(bytes_or_text, str):\n return bytes_or_text.encode(encoding)\n elif isinstance(bytes_or_text, bytes):\n return bytes_or_text\n else:\n raise TypeError('Expected binary or unicode string, got %r' % (bytes_or_text,))", + "docstring": "Converts , , or unicode python input types to . Uses utf-8 encoding for text by default. Args: bytes_or_text: A , , , or object. encoding: A string indicating the charset for encoding unicode. Returns: A object. Raises: TypeError: If is not a binary or unicode string.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\compat.py", + "ast_data": "FunctionDef name:as_bytes arg:bytes_or_text arg:encoding arguments arg arg Assign Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Raise Call" + }, + { + "library": "tensorflow", + "name": "_convert_actual", + "source_code": "def _convert_actual(entity, program_ctx):\n if not hasattr(entity, '__code__'):\n raise ValueError(\"Cannot apply autograph to a function that doesn't expose a __code__ object. If this is a @tf.function, try passing f.python_function instead.\")\n transformed, module, source_map = _TRANSPILER.transform(entity, program_ctx)\n assert not hasattr(transformed, 'ag_module')\n assert not hasattr(transformed, 'ag_source_map')\n transformed.ag_module = module\n transformed.ag_source_map = source_map\n return transformed", + "docstring": "Applies AutoGraph to entity.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py", + "ast_data": "FunctionDef name:_convert_actual arg:entity arg:program_ctx arguments arg arg If Call Raise Call Assign Call Call Call Assign Assign Return return:yes" + }, + { + "library": "scipy", + "name": "Price04", + "source_code": "class Price04(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-50.0] * self.N, [50.0] * self.N))\n self.custom_bounds = ([0, 2], [0, 2])\n self.global_optimum = [[2.0, 4.0]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n return (2.0 * x[1] * x[0] ** 3.0 - x[1] ** 3.0) ** 2.0 + (6.0 * x[0] - x[1] ** 2.0 + x[1]) ** 2.0", + "docstring": "Price 4 objective function. This class defines the Price 4 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Price04}}(x) = (2 x_1^3 x_2 - x_2^3)^2 + (6 x_1 - x_2^2 + x_2)^2 with :math: for :math:. *Global optimum*: :math: for :math:, :math: and :math: .. [1] Price, W. A controlled random search procedure for global optimisation Computer Journal, 1977, 20, 367-370", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py", + "ast_data": "ClassDef name:Price04 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "variable_sync_on_read_context", + "source_code": "@tf_export('__internal__.distribute.variable_sync_on_read_context', v1=[])\n@contextlib.contextmanager\ndef variable_sync_on_read_context():\n try:\n _variable_sync_on_read_context.entered = True\n yield\n finally:\n _variable_sync_on_read_context.entered = False", + "docstring": "A context that forces SyncOnReadVariable to aggregate upon reading. This context is useful if one wants to read the aggregated value out of a SyncOnReadVariable in replica context. By default the aggregation is turned off per the definition of SyncOnReadVariable. When reading a SyncOnReadVariable in cross-replica context, aggregation is always turned on so there is no need for such context. By reading a SyncOnReadVariable, we mean: 1. Convert the variable to a tensor using . 2. Calling or . Example usage: Yields: Context manager for aggregating SyncOnReadVariable upon reading.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:variable_sync_on_read_context arguments Try Assign Assign Call" + }, + { + "library": "pytorch", + "name": "divide_kwargs", + "source_code": "@staticmethod\ndef divide_kwargs(kwargs):\n from torch.utils.checkpoint import checkpoint\n ckpt_signature = inspect.signature(checkpoint)\n checkpoint_keys = set()\n for name in ckpt_signature.parameters:\n if name in ('function', 'args', 'kwargs'):\n continue\n checkpoint_keys.add(name)\n checkpoint_keys.add('preserve_rng_state')\n checkpoint_kwargs = {name: kwargs[name] for name in kwargs.keys() if name in checkpoint_keys}\n gmod_kwargs = {name: kwargs[name] for name in kwargs.keys() if name not in checkpoint_keys}\n return (checkpoint_kwargs, gmod_kwargs)", + "docstring": "checkpoint fn can have mixed kwargs between checkpointed fn and checkpoint fn itself. For example >> def gn(x, y, z=None): >> a = torch.matmul(x, y) >> if z is not None: >> return torch.matmul(a, z) >> return a >> def fn(x, y, z): >> return torch.cos(checkpoint(gn, x, y, use_reentrant=False, z=z)) In the above case, z belongs to checkpointed function gn, but use_reentrant belongs to the checkpoint function. This function splits the kwargs into checkpoint_kwargs and gmod_kwargs (or checkpointed_fn_kwargs). We do sorting to ensure same graph from run to run for better debuggability. It is not required for correctness.", + "type": "method", + "file_path": "pytorch\\torch\\_higher_order_ops\\wrap.py", + "ast_data": "FunctionDef name:divide_kwargs arg:kwargs arguments arg Assign Call Assign Call For If Compare Call Call Assign Call Compare Assign Call Compare Return return:yes" + }, + { + "library": "scipy", + "name": "hadamard", + "source_code": "def hadamard(n, dtype=int):\n if n < 1:\n lg2 = 0\n else:\n lg2 = int(math.log(n, 2))\n if 2 ** lg2 != n:\n raise ValueError('n must be an positive integer, and n must be a power of 2')\n H = np.array([[1]], dtype=dtype)\n for i in range(0, lg2):\n H = np.vstack((np.hstack((H, H)), np.hstack((H, -H))))\n return H", + "docstring": "Construct an Hadamard matrix. Constructs an n-by-n Hadamard matrix, using Sylvester's construction. must be a power of 2. Parameters ---------- n : int The order of the matrix. must be a power of 2. dtype : dtype, optional The data type of the array to be constructed. Returns ------- H : (n, n) ndarray The Hadamard matrix. Notes ----- .. versionadded:: 0.8.0 Examples -------- >>> from scipy.linalg import hadamard >>> hadamard(2, dtype=complex) array([[ 1.+0.j, 1.+0.j], [ 1.+0.j, -1.-0.j]]) >>> hadamard(4) array([[ 1, 1, 1, 1], [ 1, -1, 1, -1], [ 1, 1, -1, -1], [ 1, -1, -1, 1]])", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_special_matrices.py", + "ast_data": "FunctionDef name:hadamard arg:n arg:dtype arguments arg arg If Compare Assign Assign Call Call If Compare Raise Call Assign Call For Call Assign Call Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "ravel", + "source_code": "def ravel(self, order: Literal['C', 'F', 'A', 'K'] | None='C') -> Self:\n return self", + "docstring": "Return a flattened view on this array. Parameters ---------- order : {None, 'C', 'F', 'A', 'K'}, default 'C' Returns ------- ExtensionArray A flattened view on the array. See Also -------- ExtensionArray.tolist: Return a list of the values. Notes ----- - Because ExtensionArrays are 1D-only, this is a no-op. - The \"order\" argument is ignored, is for compatibility with NumPy. Examples -------- >>> pd.array([1, 2, 3]).ravel() [1, 2, 3] Length: 3, dtype: Int64", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:ravel arg:self arg:order arguments arg arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "_validate_converted_limits", + "source_code": "def _validate_converted_limits(self, limit, convert):\n if limit is not None:\n converted_limit = convert(limit)\n if isinstance(converted_limit, np.ndarray):\n converted_limit = converted_limit.squeeze()\n if isinstance(converted_limit, Real) and (not np.isfinite(converted_limit)):\n raise ValueError('Axis limits cannot be NaN or Inf')\n return converted_limit", + "docstring": "Raise ValueError if converted limits are non-finite. Note that this function also accepts None as a limit argument. Returns ------- The limit value after call to convert(), or None if limit is None.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:_validate_converted_limits arg:self arg:limit arg:convert arguments arg arg arg If Compare Assign Call If Call Assign Call If BoolOp Call Call Raise Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_generate_charstrings", + "source_code": "def _generate_charstrings(font):\n go = font.getGlyphOrder()\n s = f'/CharStrings {len(go)} dict dup begin\\n'\n for i, name in enumerate(go):\n s += f'/{name} {i} def\\n'\n s += 'end readonly def'\n return s", + "docstring": "Transform font glyphs into CharStrings Helper function for _font_to_ps_type42. Parameters ---------- font : fontTools.ttLib.ttFont.TTFont The font Returns ------- str A definition of the CharStrings dictionary in PostScript", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py", + "ast_data": "FunctionDef name:_generate_charstrings arg:font arguments arg Assign Call Assign Call For Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "avg_pool1d", + "source_code": "@tf_export('nn.avg_pool1d')\n@dispatch.add_dispatch_support\ndef avg_pool1d(input, ksize, strides, padding, data_format='NWC', name=None):\n with ops.name_scope(name, 'AvgPool1D', [input]) as name:\n if data_format is None:\n data_format = 'NWC'\n channel_index = 1 if data_format.startswith('NC') else 2\n ksize = [1] + _get_sequence(ksize, 1, channel_index, 'ksize')\n strides = [1] + _get_sequence(strides, 1, channel_index, 'strides')\n expanding_dim = 1 if data_format == 'NWC' else 2\n data_format = 'NHWC' if data_format == 'NWC' else 'NCHW'\n input = array_ops.expand_dims_v2(input, expanding_dim)\n result = gen_nn_ops.avg_pool(input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)\n return array_ops.squeeze(result, expanding_dim)", + "docstring": "Performs the average pooling on the input. Each entry in is the mean of the corresponding size window in . Note internally this op reshapes and uses the underlying 2d operation. Args: input: A 3-D of the format specified by . ksize: An int or list of that has length or . The size of the window for each dimension of the input tensor. strides: An int or list of that has length or . The stride of the sliding window for each dimension of the input tensor. padding: A string, either or . The padding algorithm. See [here]( for more information. data_format: An optional string from: \"NWC\", \"NCW\". Defaults to \"NWC\". name: A name for the operation (optional). Returns: A of format specified by . The max pooled output tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:avg_pool1d arg:input arg:ksize arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg With Call If Compare Assign Assign Call Assign Call Assign Call Assign Compare Assign Compare Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "cryptography", + "name": "public_key", + "source_code": "@abc.abstractmethod\ndef public_key(self) -> EllipticCurvePublicKey:\n pass", + "docstring": "The EllipticCurvePublicKey for this private key.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py", + "ast_data": "FunctionDef name:public_key arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "dense_shape_and_type", + "source_code": "def dense_shape_and_type(matrix):\n if not isinstance(matrix, tensor_lib.Tensor):\n raise TypeError('matrix should be a tensor, but saw: %s' % (matrix,))\n if matrix.dtype != dtypes.variant:\n raise TypeError('expected matrix to be type tf.variant, but saw: %s' % (matrix.dtype,))\n handle_data = _get_handle_data(matrix)\n if not handle_data or not handle_data.is_set:\n raise ValueError('matrix has missing handle data: %s' % (matrix,))\n if len(handle_data.shape_and_type) != 1:\n raise ValueError(\"len(matrix.handle_data.shape_and_type) != 1: '%s'\" % (handle_data.shape_and_type,))\n return DenseShapeAndType(tensor_shape.TensorShape(handle_data.shape_and_type[0].shape), dtypes.DType(handle_data.shape_and_type[0].dtype))", + "docstring": "Get dense shape and dtype of the tf.Tensor containing the matrix. Args: matrix: A of type storing a sparse matrix. Returns: An instance of with properties (a ) and (a ). Raises: TypeError: if is not a tensor or its dtype is not variant. ValueError: if lacks static handle data containing the dense shape and dtype.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_ops.py", + "ast_data": "FunctionDef name:dense_shape_and_type arg:matrix arguments arg If Call Raise Call If Compare Raise Call Assign Call If BoolOp Raise Call If Compare Call Raise Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, object_local_name: str, from_shard_layouts: Sequence[sparse_core_layout_pb2.SparseCoreTableLayout], to_shard_layouts: Sequence[sparse_core_layout_pb2.SparseCoreTableLayout]):\n logging.info('Creating EmbeddingReshardCallback for %s', object_local_name)\n self._object_local_name = object_local_name\n self._from_shard_layouts = from_shard_layouts\n self._to_shard_layouts = to_shard_layouts", + "docstring": "Initializes Reshard callback. Args: object_local_name: The local name of the object being restored. from_shard_layouts: layouts as in checkpoint being restored from. to_shard_layouts: target layouts as specified in the embedding being restored.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_checkpoint_adapter.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:object_local_name arg:from_shard_layouts arg:to_shard_layouts arguments arg arg arg arg Call Assign Assign Assign" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, patch_func=None, **kwargs):\n super().__init__(**kwargs)\n self._patch_func = patch_func", + "docstring": "Parameters ---------- patch_func : callable, optional The function that creates the legend key artist. *patch_func* should have the signature:: def patch_func(legend=legend, orig_handle=orig_handle, xdescent=xdescent, ydescent=ydescent, width=width, height=height, fontsize=fontsize) Subsequently, the created artist will have its `.HandlerBase`.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:patch_func arguments arg arg arg Call Call Assign" + }, + { + "library": "tensorflow", + "name": "_AddShardedRestoreOps", + "source_code": "def _AddShardedRestoreOps(self, filename_tensor, per_device, restore_sequentially, reshape):\n sharded_restores = []\n for shard, (device, saveables) in enumerate(per_device):\n with ops.device(device):\n sharded_restores.append(self._AddRestoreOps(filename_tensor, saveables, restore_sequentially, reshape, preferred_shard=shard, name='restore_shard'))\n return control_flow_ops.group(*sharded_restores, name='restore_all')", + "docstring": "Add Ops to restore variables from multiple devices. Args: filename_tensor: Tensor for the path of the file to load. per_device: A list of (device, SaveableObject) pairs, as returned by _GroupByDevices(). restore_sequentially: True if we want to restore variables sequentially within a shard. reshape: True if we want to reshape loaded tensors to the shape of the corresponding variable. Returns: An Operation that restores the variables.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py", + "ast_data": "FunctionDef name:_AddShardedRestoreOps arg:self arg:filename_tensor arg:per_device arg:restore_sequentially arg:reshape arguments arg arg arg arg arg Assign For Call With Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "name", + "source_code": "@property\ndef name(self):\n return self._delayed_rewrite_functions.forward().name", + "docstring": "name.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "__len__", + "source_code": "def __len__(self):\n return self.geom_count", + "docstring": "Return the number of interior rings in this Polygon.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes" + }, + { + "library": "scrapy", + "name": "verify_url_scheme", + "source_code": "def verify_url_scheme(url: str) -> str:\n parsed = urlparse(url)\n if parsed.scheme == '' and parsed.netloc == '':\n parsed = urlparse('//' + url)._replace(scheme='https')\n return parsed.geturl()", + "docstring": "Check url for scheme and insert https if none found.", + "type": "function", + "file_path": "scrapy\\scrapy\\commands\\genspider.py", + "ast_data": "FunctionDef name:verify_url_scheme arg:url arguments arg Assign Call If BoolOp Compare Compare Assign Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_slope", + "source_code": "def get_slope(self):\n return self._slope", + "docstring": "Return the *slope* value of the line.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\lines.py", + "ast_data": "FunctionDef name:get_slope arg:self arguments arg Return return:yes" + }, + { + "library": "sphinx", + "name": "PyClassMethod", + "source_code": "class PyClassMethod(PyMethod):\n option_spec: ClassVar[OptionSpec] = PyObject.option_spec.copy()\n\n def run(self) -> list[Node]:\n self.name = 'py:method'\n self.options['classmethod'] = True\n return super().run()", + "docstring": "Description of a classmethod.", + "type": "class", + "file_path": "sphinx\\sphinx\\domains\\python\\__init__.py", + "ast_data": "ClassDef name:PyClassMethod Call FunctionDef name:run arg:self arguments arg Assign Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "GraphWeakTensor", + "source_code": "class GraphWeakTensor(core.Symbol, WeakTensor):\n __name__ = 'tf.GraphWeakTensor'", + "docstring": "A weakly typed Graph Tensor.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\weak_tensor.py", + "ast_data": "ClassDef name:GraphWeakTensor Assign" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, X):\n check_is_fitted(self)\n if self.n_neighbors is not None:\n distances, indices = self.nbrs_.kneighbors(X, return_distance=True)\n else:\n distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True)\n n_samples_fit = self.nbrs_.n_samples_fit_\n n_queries = distances.shape[0]\n if hasattr(X, 'dtype') and X.dtype == np.float32:\n dtype = np.float32\n else:\n dtype = np.float64\n G_X = np.zeros((n_queries, n_samples_fit), dtype)\n for i in range(n_queries):\n G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0)\n G_X **= 2\n G_X *= -0.5\n return self.kernel_pca_.transform(G_X)", + "docstring": "Transform X. This is implemented by linking the points X into the graph of geodesic distances of the training data. First the nearest neighbors of X are found in the training data, and from these the shortest geodesic distances from each point in X to each point in the training data are computed in order to construct the kernel. The embedding of X is the projection of this kernel onto the embedding vectors of the training set. Parameters ---------- X : {array-like, sparse matrix}, shape (n_queries, n_features) If neighbors_algorithm='precomputed', X is assumed to be a distance matrix or a sparse graph of shape (n_queries, n_samples_fit). Returns ------- X_new : array-like, shape (n_queries, n_components) X transformed in the new space.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\manifold\\_isomap.py", + "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call If Compare Assign Call Assign Call Assign Assign If BoolOp Call Compare Assign Assign Assign Call For Call Assign Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "get_full_qualified_name", + "source_code": "def get_full_qualified_name(self, node: Element) -> str | None:\n pass", + "docstring": "Return full qualified name for given node.", + "type": "method", + "file_path": "sphinx\\sphinx\\domains\\__init__.py", + "ast_data": "FunctionDef name:get_full_qualified_name arg:self arg:node arguments arg arg" + }, + { + "library": "scikit-learn", + "name": "diag", + "source_code": "def diag(self, X):\n return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2", + "docstring": "Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y). Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X).", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:diag arg:self arg:X arguments arg arg Return return:yes Call" + }, + { + "library": "scrapy", + "name": "disconnect", + "source_code": "def disconnect(self, receiver: Any, signal: Any, **kwargs: Any) -> None:\n kwargs.setdefault('sender', self.sender)\n dispatcher.disconnect(receiver, signal, **kwargs)", + "docstring": "Disconnect a receiver function from a signal. This has the opposite effect of the :meth: method, and the arguments are the same.", + "type": "method", + "file_path": "scrapy\\scrapy\\signalmanager.py", + "ast_data": "FunctionDef name:disconnect arg:self arg:receiver arg:signal arguments arg arg arg arg Call Call" + }, + { + "library": "pytorch", + "name": "_rename_without_collisions", + "source_code": "def _rename_without_collisions(name_map: dict[str, str], orig_name: str, name: str, is_placeholder: bool=False):\n if name in name_map.values():\n match = re.match('(.*)_(\\\\d+)', name)\n if match and (not is_placeholder):\n name, n = (match.group(1), int(match.group(2)))\n else:\n n = 0\n while (dup_name := f'{name}_{n + 1}') in name_map.values():\n n += 1\n name_map[orig_name] = dup_name\n else:\n name_map[orig_name] = name\n return name_map[orig_name]", + "docstring": "Renames nodes to avoid name collisions, with suffixing. name_map: map from original name to new name orig_name: mapping key name: candidate name (potentially suffixed, e.g. mul_2) is_placeholder: if the node is a placeholder, avoid detecting suffix", + "type": "function", + "file_path": "pytorch\\torch\\_export\\utils.py", + "ast_data": "FunctionDef name:_rename_without_collisions arg:name_map arg:orig_name arg:name arg:is_placeholder arguments arg arg arg arg If Compare Call Assign Call If BoolOp Assign Call Call Call Assign While Compare Call Assign Assign Return return:yes" + }, + { + "library": "kornia", + "name": "__call__", + "source_code": "def __call__(self, images: Union[Tensor, list[Tensor]]) -> Union[Tensor, list[Tensor]]:\n if isinstance(images, (list, tuple)):\n results = [super(DepthEstimation, self).__call__(image[None].cpu().numpy())[0] for image in images]\n results = [self.resize_back(tensor(result, device=image.device, dtype=image.dtype), image) for result, image in zip(results, images)]\n return results\n result = super().__call__(images.cpu().numpy())[0]\n result = tensor(result, device=images.device, dtype=images.dtype)\n return self.resize_back(result, images)", + "docstring": "Detect objects in a given list of images. Args: images: If list of RGB images. Each image is a Tensor with shape :math:. If Tensor, a Tensor with shape :math:. Returns: list of detections found in each image. For item in a batch, shape is :math:, where :math: is the number of detections in the given image, :math: represents class id, score, and bounding box.", + "type": "method", + "file_path": "kornia\\kornia\\models\\depth_estimation\\base.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:images arguments arg arg If Call Assign Call Call Call Call Assign Call Call Call Return return:yes Assign Call Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "respond", + "source_code": "def respond(body, encoding='utf-8', allow_none=0):\n if not isinstance(body, XMLRPCFault):\n body = (body,)\n _set_response(xmlrpc_dumps(body, methodresponse=1, encoding=encoding, allow_none=allow_none))", + "docstring": "Construct HTTP response body.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\xmlrpcutil.py", + "ast_data": "FunctionDef name:respond arg:body arg:encoding arg:allow_none arguments arg arg arg If Call Assign Call Call" + }, + { + "library": "django", + "name": "__deepcopy__", + "source_code": "def __deepcopy__(self, memodict):\n return self.clone()", + "docstring": "The routine is used by the class of django.utils.tree; thus, the protocol routine needs to be implemented to return correct copies (clones) of these GEOS objects, which use C pointers.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:__deepcopy__ arg:self arg:memodict arguments arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "cython_operation", + "source_code": "@final\ndef cython_operation(self, *, values: ArrayLike, axis: AxisInt, min_count: int=-1, comp_ids: np.ndarray, ngroups: int, **kwargs) -> ArrayLike:\n self._validate_axis(axis, values)\n if not isinstance(values, np.ndarray):\n return values._groupby_op(how=self.how, has_dropped_na=self.has_dropped_na, min_count=min_count, ngroups=ngroups, ids=comp_ids, **kwargs)\n return self._cython_op_ndim_compat(values, min_count=min_count, ngroups=ngroups, comp_ids=comp_ids, mask=None, **kwargs)", + "docstring": "Call our cython function, with appropriate pre- and post- processing.", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\ops.py", + "ast_data": "FunctionDef name:cython_operation arg:self arguments arg arg arg arg arg arg arg Call If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_finish_y", + "source_code": "def _finish_y(self, y, x_shape):\n y = y.reshape(x_shape + self._y_extra_shape)\n if self._y_axis != 0 and x_shape != ():\n nx = len(x_shape)\n ny = len(self._y_extra_shape)\n s = list(range(nx, nx + self._y_axis)) + list(range(nx)) + list(range(nx + self._y_axis, nx + ny))\n y = y.transpose(s)\n return y", + "docstring": "Reshape interpolated y back to an N-D array similar to initial y", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_polyint.py", + "ast_data": "FunctionDef name:_finish_y arg:self arg:y arg:x_shape arguments arg arg arg Assign Call If BoolOp Compare Compare Assign Call Assign Call Assign Call Call Call Call Call Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "valid_reason", + "source_code": "@property\ndef valid_reason(self):\n return capi.geos_isvalidreason(self.ptr).decode()", + "docstring": "Return a string containing the reason for any invalidity.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:valid_reason arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_replace_dropout", + "source_code": "def _replace_dropout(m: torch.fx.GraphModule, train_to_eval: bool):\n from .utils import _get_aten_graph_module_for_pattern\n m.graph.eliminate_dead_code()\n m.recompile()\n for inplace in [False, True]:\n\n def dropout_train(x):\n return F.dropout(x, p=0.5, training=True, inplace=inplace)\n\n def dropout_eval(x):\n return F.dropout(x, p=0.5, training=False, inplace=inplace)\n example_inputs = (torch.randn(1),)\n if train_to_eval:\n match_pattern = _get_aten_graph_module_for_pattern(_WrapperModule(dropout_train), example_inputs)\n replacement_pattern = _get_aten_graph_module_for_pattern(_WrapperModule(dropout_eval), example_inputs)\n else:\n match_pattern = _get_aten_graph_module_for_pattern(_WrapperModule(dropout_eval), example_inputs)\n replacement_pattern = _get_aten_graph_module_for_pattern(_WrapperModule(dropout_train), example_inputs)\n from torch.fx.subgraph_rewriter import replace_pattern_with_filters\n replace_pattern_with_filters(m, match_pattern, replacement_pattern, match_filters=[], ignore_literals=True)\n m.recompile()", + "docstring": "Switch dropout patterns in the model between train and eval modes. Dropout has different behavior in train vs eval mode. For exported models, however, calling or does not automatically switch the dropout behavior between the two modes, so here we need to rewrite the aten dropout patterns manually to achieve the same effect. See", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\export_utils.py", + "ast_data": "FunctionDef name:_replace_dropout arg:m arg:train_to_eval arguments arg arg Call Call For FunctionDef name:dropout_train arg:x arguments arg Return return:yes Call FunctionDef name:dropout_eval arg:x arguments arg Return return:yes Call Assign Call If Assign Call Call Assign Call Call Assign Call Call Assign Call Call Call Call" + }, + { + "library": "pytorch", + "name": "accuracy", + "source_code": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", + "docstring": "Computes the precision@k for the specified values of k", + "type": "function", + "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py", + "ast_data": "FunctionDef name:accuracy arg:output arg:target arg:topk arguments arg arg arg If Compare Call Return return:yes Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Assign For Assign Call Call Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_device_capability", + "source_code": "def get_device_capability(device: Optional[_device_t]=None) -> tuple[int, int]:\n return torch._C._mtia_getDeviceCapability(_get_device_index(device, optional=True))", + "docstring": "Return capability of a given device as a tuple of (major version, minor version). Args: device (torch.device or int, optional) selected device. Returns statistics for the current device, given by current_device(), if device is None (default).", + "type": "function", + "file_path": "pytorch\\torch\\mtia\\__init__.py", + "ast_data": "FunctionDef name:get_device_capability arg:device arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_RecordLastCheckpoint", + "source_code": "def _RecordLastCheckpoint(self, latest_save_path):\n if not self.saver_def.max_to_keep:\n return\n for p in self._last_checkpoints[:]:\n if latest_save_path == self._CheckpointFilename(p):\n self._last_checkpoints.remove(p)\n self._last_checkpoints.append((latest_save_path, time.time()))\n if len(self._last_checkpoints) > self.saver_def.max_to_keep:\n self._checkpoints_to_be_deleted.append(self._last_checkpoints.pop(0))", + "docstring": "Manages the list of the latest checkpoints.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py", + "ast_data": "FunctionDef name:_RecordLastCheckpoint arg:self arg:latest_save_path arguments arg arg If Return return:no For If Compare Call Call Call Call If Compare Call Call Call" + }, + { + "library": "tensorflow", + "name": "sanitize_spec_name", + "source_code": "def sanitize_spec_name(name: str) -> str:\n if not name:\n return 'unknown'\n swapped = ''.join([c if c.isalnum() else '_' for c in name.lower()])\n if swapped[0].isalpha():\n return swapped\n else:\n return 'tensor_' + swapped", + "docstring": "Sanitizes Spec names. Matches Graph Node and Python naming conventions. Without sanitization, names that are not legal Python parameter names can be set which makes it challenging to represent callables supporting the named calling capability. Args: name: The name to sanitize. Returns: A string that meets Python parameter conventions.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py", + "ast_data": "FunctionDef name:sanitize_spec_name arg:name arguments arg If Return return:yes Assign Call Call Call If Call Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "__from_arrow__", + "source_code": "def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> IntervalArray:\n import pyarrow\n from pandas.core.arrays import IntervalArray\n if isinstance(array, pyarrow.Array):\n chunks = [array]\n else:\n chunks = array.chunks\n results = []\n for arr in chunks:\n if isinstance(arr, pyarrow.ExtensionArray):\n arr = arr.storage\n left = np.asarray(arr.field('left'), dtype=self.subtype)\n right = np.asarray(arr.field('right'), dtype=self.subtype)\n iarr = IntervalArray.from_arrays(left, right, closed=self.closed)\n results.append(iarr)\n if not results:\n return IntervalArray.from_arrays(np.array([], dtype=self.subtype), np.array([], dtype=self.subtype), closed=self.closed)\n return IntervalArray._concat_same_type(results)", + "docstring": "Construct IntervalArray from pyarrow Array/ChunkedArray.", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", + "ast_data": "FunctionDef name:__from_arrow__ arg:self arg:array arguments arg arg If Call Assign Assign Assign For If Call Assign Assign Call Call Assign Call Call Assign Call Call If Return return:yes Call Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "make_list", + "source_code": "@register.filter(is_safe=False)\n@stringfilter\ndef make_list(value):\n return list(value)", + "docstring": "Return the value turned into a list. For an integer, it's a list of digits. For a string, it's a list of characters.", + "type": "function", + "file_path": "django\\django\\template\\defaultfilters.py", + "ast_data": "FunctionDef name:make_list arg:value arguments arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "from_matrix", + "source_code": "@classmethod\ndef from_matrix(cls, matrix: Tensor) -> So2:\n check_so2_matrix_shape(matrix)\n check_so2_matrix(matrix)\n z = complex(matrix[..., 0, 0], matrix[..., 1, 0])\n return cls(z)", + "docstring": "Create So2 from a rotation matrix. Args: matrix: the rotation matrix to convert of shape :math:. Example: >>> m = torch.eye(2) >>> s = So2.from_matrix(m) >>> s.z Parameter containing: tensor(1.+0.j, requires_grad=True)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py", + "ast_data": "FunctionDef name:from_matrix arg:cls arg:matrix arguments arg arg Call Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_compute_depth", + "source_code": "def _compute_depth(tree, node):\n\n def compute_depth_(current_node, current_depth, children_left, children_right, depths):\n depths += [current_depth]\n left = children_left[current_node]\n right = children_right[current_node]\n if left != -1 and right != -1:\n compute_depth_(left, current_depth + 1, children_left, children_right, depths)\n compute_depth_(right, current_depth + 1, children_left, children_right, depths)\n depths = []\n compute_depth_(node, 1, tree.children_left, tree.children_right, depths)\n return max(depths)", + "docstring": "Returns the depth of the subtree rooted in node.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\tree\\_export.py", + "ast_data": "FunctionDef name:_compute_depth arg:tree arg:node arguments arg arg FunctionDef name:compute_depth_ arg:current_node arg:current_depth arg:children_left arg:children_right arg:depths arguments arg arg arg arg arg Assign Assign If BoolOp Compare Compare Call Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "close_rings", + "source_code": "def close_rings(self):\n capi.geom_close_rings(self.ptr)", + "docstring": "If there are any rings within this geometry that have not been closed, this routine will do so by adding the starting point at the end.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:close_rings arg:self arguments arg Call" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, layout, inputs, constant_args=()) -> None:\n self.has_bias = len(inputs) == 8\n self.idx_for_inplace_sum = 6\n super().__init__(layout, inputs, constant_args, None, op_overload=torch.ops.onednn.qconv2d_pointwise.binary, cpp_kernel_name='aoti_torch_cpu__qconv2d_pointwise_binary_tensor')", + "docstring": "Needs input/weight/output qparams if bias is not None - inputs = [x, x_scale, x_zp, w, w_scale, w_zp, accum, b] - const_args = [stride, padding, dilation, groups, o_scale, o_zp, output_dtype, accum_scale, accum_zp, binary_attr, aplha, unary_attr, unary_scalars, unary_algorithm] else - inputs = [x, x_scale, x_zp, w, w_scale, w_zp, accum] - const_args [b, stride, padding, dilation, groups, o_scale, o_zp, output_dtype, accum_scale, accum_zp, binary_attr, aplha, unary_attr, unary_scalars, unary_algorithm]", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\mkldnn_ir.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:layout arg:inputs arg:constant_args arguments arg arg arg arg Assign Compare Call Assign Call Call" + }, + { + "library": "scipy", + "name": "from_diagonal", + "source_code": "@staticmethod\ndef from_diagonal(diagonal):\n return CovViaDiagonal(diagonal)", + "docstring": "Return a representation of a covariance matrix from its diagonal. Parameters ---------- diagonal : array_like The diagonal elements of a diagonal matrix. Notes ----- Let the diagonal elements of a diagonal covariance matrix :math: be stored in the vector :math:. When all elements of :math: are strictly positive, whitening of a data point :math: is performed by computing :math:, where the inverse square root can be taken element-wise. :math: is calculated as :math:, where the :math: operation is performed element-wise. This class supports singular covariance matrices. When computing `ddCovarianceCovariance` object against a reference implementations. >>> res = cov.whiten(x) >>> ref = np.diag(d**-0.5) @ x >>> np.allclose(res, ref) True >>> res = cov.log_pdet >>> ref = np.linalg.slogdet(A)[-1] >>> np.allclose(res, ref) True", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_covariance.py", + "ast_data": "FunctionDef name:from_diagonal arg:diagonal arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "should_save_summary", + "source_code": "@property\ndef should_save_summary(self):\n raise NotImplementedError('must be implemented in descendants')", + "docstring": "Whether saving summaries is needed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:should_save_summary arg:self arguments arg Raise Call" + }, + { + "library": "pytorch", + "name": "prepare_local_plan", + "source_code": "@abc.abstractmethod\ndef prepare_local_plan(self, plan: SavePlan) -> SavePlan:\n pass", + "docstring": "Perform storage-specific local planning. While this method can produce a completely different plan, the recommended way is to store storage specific data in SavePlan::storage_data. Args: plan (SavePlan): The local plan from the `` after storage local planning", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py", + "ast_data": "FunctionDef name:prepare_local_plan arg:self arg:plan arguments arg arg" + }, + { + "library": "pytorch", + "name": "scale_weight_node", + "source_code": "def scale_weight_node(node: Node, modules: dict[str, nn.Module], equalization_scale: torch.Tensor, next_equalization_scale: Optional[torch.Tensor]) -> None:\n if equalization_scale is None:\n return\n if fused_module_supports_equalization(modules[str(node.target)]):\n op_module = modules[str(node.target)][0]\n else:\n op_module = modules[str(node.target)]\n assert nn_module_supports_equalization(op_module) or custom_module_supports_equalization(op_module)\n weight = op_module.weight\n assert isinstance(weight, torch.Tensor)\n equalization_scale_reshaped = reshape_scale(equalization_scale, 1, weight)\n scaled_weight = torch.mul(weight, torch.reciprocal(equalization_scale_reshaped))\n if next_equalization_scale is None:\n op_module.weight = nn.Parameter(scaled_weight)\n return\n next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, weight)\n scaled_weight = torch.mul(scaled_weight, next_equalization_scale_reshaped)\n op_module.weight = nn.Parameter(scaled_weight)\n bias = op_module.bias\n if bias is None:\n return\n assert isinstance(bias, torch.Tensor)\n next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, bias)\n scaled_bias = torch.mul(bias, next_equalization_scale_reshaped)\n op_module.bias = nn.Parameter(scaled_bias)", + "docstring": "Scale the weights for input-weight equalization by multiplying the weight by 1/equalization_scale and next_equalization_scale Args: node: Current node whose weights we want to scale equalization_scale: Current node's calculated equalization scale next_equalization_scale: Next node's calculated equalization scale if the following node needs to be equalized, 1 otherwise", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py", + "ast_data": "FunctionDef name:scale_weight_node arg:node arg:modules arg:equalization_scale arg:next_equalization_scale arguments arg arg arg arg If Compare Return return:no If Call Call Assign Call Assign Call BoolOp Call Call Assign Call Assign Call Assign Call Call If Compare Assign Call Return return:no Assign Call Assign Call Assign Call Assign If Compare Return return:no Call Assign Call Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "ctc_state_log_probs", + "source_code": "def ctc_state_log_probs(seq_lengths, max_seq_length):\n batch_size = _get_dim(seq_lengths, 0)\n num_label_states = max_seq_length + 1\n num_duration_states = 2\n num_states = num_duration_states * num_label_states\n log_0 = math_ops.cast(math_ops.log(math_ops.cast(0, dtypes.float64) + 1e-307), dtypes.float32)\n initial_state_log_probs = array_ops.one_hot(indices=array_ops.zeros([batch_size], dtype=dtypes.int32), depth=num_states, on_value=0.0, off_value=log_0, axis=1)\n label_final_state_mask = array_ops.one_hot(seq_lengths, depth=num_label_states, axis=0)\n duration_final_state_mask = array_ops.ones([num_duration_states, 1, batch_size])\n final_state_mask = duration_final_state_mask * label_final_state_mask\n final_state_log_probs = (1.0 - final_state_mask) * log_0\n final_state_log_probs = array_ops.reshape(final_state_log_probs, [num_states, batch_size])\n return (initial_state_log_probs, array_ops.transpose(final_state_log_probs))", + "docstring": "Computes CTC alignment initial and final state log probabilities. Create the initial/final state values directly as log values to avoid having to take a float64 log on tpu (which does not exist). Args: seq_lengths: int tensor of shape [batch_size], seq lengths in the batch. max_seq_length: int, max sequence length possible. Returns: initial_state_log_probs, final_state_log_probs", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ctc_ops.py", + "ast_data": "FunctionDef name:ctc_state_log_probs arg:seq_lengths arg:max_seq_length arguments arg arg Assign Call Assign Assign Assign Assign Call Call Call Assign Call Call Assign Call Assign Call Assign Assign Assign Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "merge", + "source_code": "def merge(base, other):\n _if_filename_register_autoreload(other)\n for section, value_map in reprconf.Parser.load(other).items():\n if not isinstance(value_map, dict):\n raise ValueError(\"Application config must include section headers, but the config you tried to merge doesn't have any sections. Wrap your config in another dict with paths as section headers, for example: {'/': config}.\")\n base.setdefault(section, {}).update(value_map)", + "docstring": "Merge one app config (from a dict, file, or filename) into another. If the given config is a filename, it will be appended to the list of files to monitor for \"autoreload\" changes.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\_cpconfig.py", + "ast_data": "FunctionDef name:merge arg:base arg:other arguments arg arg Call For Call Call If Call Raise Call Call Call" + }, + { + "library": "tensorflow", + "name": "_extract_type_spec_recursively", + "source_code": "def _extract_type_spec_recursively(value):\n if isinstance(value, composite_tensor.CompositeTensor):\n return value._type_spec\n if isinstance(value, variables.Variable):\n return resource_variable_ops.VariableSpec(value.shape, dtype=value.dtype, trainable=value.trainable)\n if tensor_util.is_tensor(value):\n return tensor_spec.TensorSpec(value.shape, value.dtype)\n if isinstance(value, list):\n return list((_extract_type_spec_recursively(v) for v in value))\n if isinstance(value, data_structures.TrackableDataStructure):\n return _extract_type_spec_recursively(value.__wrapped__)\n if isinstance(value, tuple):\n return type(value)((_extract_type_spec_recursively(x) for x in value))\n if isinstance(value, dict):\n return type(value)(((k, _extract_type_spec_recursively(v)) for k, v in value.items()))\n return value", + "docstring": "Return (collection of) (s) for if it includes s. If is a or , return its . If is a collection containing values, recursively supplant them with their respective s in a collection of parallel stucture. If is none of the above, return it unchanged. Args: value: a Python to (possibly) turn into a (collection of) (s). Returns: spec: the or collection of s corresponding to or , if no s are found.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:_extract_type_spec_recursively arg:value arguments arg If Call Return return:yes If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Call If Call Return return:yes Call If Call Return return:yes Call Call Call If Call Return return:yes Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_lr", + "source_code": "@override\ndef get_lr(self) -> list[float]:\n _warn_get_lr_called_within_step(self)\n if self._is_initial:\n return [group['lr'] for group in self.optimizer.param_groups]\n elif self._step_count == 1 and self.last_epoch > 0:\n return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(self.last_epoch * math.pi / self.T_max)) / 2 for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)]\n elif (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0:\n return [group['lr'] + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2 for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)]\n return [(1 + math.cos(math.pi * self.last_epoch / self.T_max)) / (1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max)) * (group['lr'] - self.eta_min) + self.eta_min for group in self.optimizer.param_groups]", + "docstring": "Retrieve the learning rate of each parameter group.", + "type": "method", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "FunctionDef name:get_lr arg:self arguments arg Call If Return return:yes If BoolOp Compare Compare Return return:yes Call Call If Compare Return return:yes Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "device", + "source_code": "def device(self, name):\n if isinstance(name, LogicalDevice):\n name = name.name\n elif pydev.is_device_spec(name):\n name = name.to_string()\n return _EagerDeviceContext(self, name)", + "docstring": "Context-manager to force placement of operations and Tensors on a device. Args: name: Name of the device or None to get default placement. Returns: Context manager that forces device placement. Raises: ValueError: If name is not a string or is an invalid device name. RuntimeError: If device scopes are not properly nested.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:device arg:self arg:name arguments arg arg If Call Assign If Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "as_string", + "source_code": "def as_string(self):\n if not self.is_set:\n return None\n string = capi.get_field_as_string(self._feat.ptr, self._index)\n return force_str(string, encoding=self._feat.encoding, strings_only=True)", + "docstring": "Retrieve the Field's value as a string.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\field.py", + "ast_data": "FunctionDef name:as_string arg:self arguments arg If Return return:no Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_resolve_group_name", + "source_code": "def _resolve_group_name(group: RANK_TYPES, tag: str='') -> str:\n if isinstance(group, dist.ProcessGroup):\n return group.group_name\n elif isinstance(group, str):\n return group\n elif isinstance(group, DeviceMesh):\n assert group.ndim == 1, 'Only 1D mesh is supported, pass in (DeviceMesh, int) together if mesh > 1D'\n return group._dim_group_names[0]\n elif isinstance(group, tuple):\n if len(group) == 2 and isinstance(group[0], DeviceMesh) and isinstance(group[1], int):\n dmesh = group[0]\n dim = group[1]\n return dmesh._dim_group_names[dim]\n else:\n raise ValueError('Invalid tuple for group must be (DeviceMesh, int)')\n elif isinstance(group, list):\n if not is_torchdynamo_compiling():\n warnings.warn('The combination of ranks + tag as process group identifier has been deprecated. Please switch to using ProcessGroup, DeviceMesh, or group name instead.', FutureWarning, stacklevel=3)\n return c10d._resolve_group_name_by_ranks_and_tag(cast(list[int], group), tag)\n else:\n raise ValueError(f'Unsupported group type: {type(group)}, {group}')", + "docstring": "Given group in RANK_TYPES, return the group name.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py", + "ast_data": "FunctionDef name:_resolve_group_name arg:group arg:tag arguments arg arg If Call Return return:yes If Call Return return:yes If Call Compare Return return:yes If Call If BoolOp Compare Call Call Call Assign Assign Return return:yes Raise Call If Call If Call Call Return return:yes Call Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "ignore_errors", + "source_code": "def ignore_errors(self, log_warning=False, name=None) -> 'DatasetV2':\n from tensorflow.python.data.ops import ignore_errors_op\n return ignore_errors_op._ignore_errors(self, log_warning, name)", + "docstring": "Drops elements that cause errors. >>> dataset = tf.data.Dataset.from_tensor_slices([1., 2., 0., 4.]) >>> dataset = dataset.map(lambda x: tf.debugging.check_numerics(1. / x, \"\")) >>> list(dataset.as_numpy_iterator()) Traceback (most recent call last): ... InvalidArgumentError: ... Tensor had Inf values >>> dataset = dataset.ignore_errors() >>> list(dataset.as_numpy_iterator()) [1.0, 0.5, 0.25] Args: log_warning: (Optional.) A bool indicating whether or not ignored errors should be logged to stderr. Defaults to . name: (Optional.) A string indicating a name for the operation. Returns: A new with the transformation applied as described above.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "FunctionDef name:ignore_errors arg:self arg:log_warning arg:name arguments arg arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "get_group_index_sorter", + "source_code": "def get_group_index_sorter(group_index: npt.NDArray[np.intp], ngroups: int | None=None) -> npt.NDArray[np.intp]:\n if ngroups is None:\n ngroups = 1 + group_index.max()\n count = len(group_index)\n alpha = 0.0\n beta = 1.0\n do_groupsort = count > 0 and alpha + beta * ngroups < count * np.log(count)\n if do_groupsort:\n sorter, _ = algos.groupsort_indexer(ensure_platform_int(group_index), ngroups)\n else:\n sorter = group_index.argsort(kind='mergesort')\n return ensure_platform_int(sorter)", + "docstring": "algos.groupsort_indexer implements and it is at least O(ngroups), where ngroups = prod(shape) shape = map(len, keys) that is, linear in the number of combinations (cartesian product) of unique values of groupby keys. This can be huge when doing multi-key groupby. np.argsort(kind='mergesort') is O(count x log(count)) where count is the length of the data-frame; Both algorithms are sort and that is necessary for correctness of groupby operations. e.g. consider: df.groupby(key)[col].transform('first') Parameters ---------- group_index : np.ndarray[np.intp] signed integer dtype ngroups : int or None, default None Returns ------- np.ndarray[np.intp]", + "type": "function", + "file_path": "pandas\\pandas\\core\\sorting.py", + "ast_data": "FunctionDef name:get_group_index_sorter arg:group_index arg:ngroups arguments arg arg If Compare Assign Call Assign Call Assign Assign Assign BoolOp Compare Compare Call If Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "bind_addr", + "source_code": "@property\ndef bind_addr(self):\n if self.socket_file:\n return self.socket_file\n if self.socket_host is None and self.socket_port is None:\n return None\n return (self.socket_host, self.socket_port)", + "docstring": "Return bind address. A (host, port) tuple for TCP sockets or a str for Unix domain sockets.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpserver.py", + "ast_data": "FunctionDef name:bind_addr arg:self arguments arg If Return return:yes If BoolOp Compare Compare Return return:no Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, points, **kwargs):\n super().__init__(**kwargs)\n points = np.asarray(points, float)\n if points.shape != (2, 2):\n raise ValueError('Bbox points must be of the form \"[[x0, y0], [x1, y1]]\".')\n self._points = points\n self._minpos = _default_minpos.copy()\n self._ignore = True\n self._points_orig = self._points.copy()", + "docstring": "Parameters ---------- points : A (2, 2) array of the form ``.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:points arguments arg arg arg Call Call Assign Call If Compare Raise Call Assign Assign Call Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "DataSizeMapper", + "source_code": "class DataSizeMapper:\n\n def __call__(self, x):\n if x is not None:\n return '%d bytes' % len(x)\n else:\n return '--'", + "docstring": "For buffers, report the number of bytes.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\lite\\tools\\visualize.py", + "ast_data": "ClassDef name:DataSizeMapper FunctionDef name:__call__ arg:self arg:x arguments arg arg If Compare Return return:yes Call Return return:yes" + }, + { + "library": "kornia", + "name": "_modified_bessel_i", + "source_code": "def _modified_bessel_i(n: int, x: Tensor) -> Tensor:\n KORNIA_CHECK(n >= 2, 'n must be greater than 1.99')\n if (x == 0.0).all():\n return x\n batch_size = x.shape[0]\n tox = 2.0 / x.abs()\n ans = zeros(batch_size, 1, device=x.device, dtype=x.dtype)\n bip = zeros(batch_size, 1, device=x.device, dtype=x.dtype)\n bi = torch.ones(batch_size, 1, device=x.device, dtype=x.dtype)\n m = int(2 * (n + int(sqrt(40.0 * n))))\n for j in range(m, 0, -1):\n bim = bip + float(j) * tox * bi\n bip = bi\n bi = bim\n idx = bi.abs() > 10000000000.0\n if idx.any():\n ans[idx] = ans[idx] * 1e-10\n bi[idx] = bi[idx] * 1e-10\n bip[idx] = bip[idx] * 1e-10\n if j == n:\n ans = bip\n out = ans * _modified_bessel_0(x) / bi\n if n % 2 == 1:\n out = where(x < 0.0, -out, out)\n out = where(x == 0.0, x, out)\n return out", + "docstring": "Adapted from:", + "type": "function", + "file_path": "kornia\\kornia\\filters\\kernels.py", + "ast_data": "FunctionDef name:_modified_bessel_i arg:n arg:x arguments arg arg Call Compare If Call Compare Return return:yes Assign Assign Call Assign Call Assign Call Assign Call Assign Call Call Call For Call Assign Call Assign Assign Assign Compare Call If Call Assign Assign Assign If Compare Assign Assign Call If Compare Assign Call Compare Assign Call Compare Return return:yes" + }, + { + "library": "django", + "name": "set_installed_apps", + "source_code": "def set_installed_apps(self, installed):\n if not self.ready:\n raise AppRegistryNotReady(\"App registry isn't ready yet.\")\n self.stored_app_configs.append(self.app_configs)\n self.app_configs = {}\n self.apps_ready = self.models_ready = self.loading = self.ready = False\n self.clear_cache()\n self.populate(installed)", + "docstring": "Enable a different set of installed apps for get_app_config[s]. installed must be an iterable in the same format as INSTALLED_APPS. set_installed_apps() must be balanced with unset_installed_apps(), even if it exits with an exception. Primarily used as a receiver of the setting_changed signal in tests. This method may trigger new imports, which may add new models to the registry of all imported models. They will stay in the registry even after unset_installed_apps(). Since it isn't possible to replay imports safely (e.g. that could lead to registering listeners twice), models are registered when they're imported and never removed.", + "type": "method", + "file_path": "django\\django\\apps\\registry.py", + "ast_data": "FunctionDef name:set_installed_apps arg:self arg:installed arguments arg arg If Raise Call Call Assign Assign Call Call" + }, + { + "library": "scikit-learn", + "name": "_ovr_decision_function", + "source_code": "def _ovr_decision_function(predictions, confidences, n_classes):\n n_samples = predictions.shape[0]\n votes = np.zeros((n_samples, n_classes))\n sum_of_confidences = np.zeros((n_samples, n_classes))\n k = 0\n for i in range(n_classes):\n for j in range(i + 1, n_classes):\n sum_of_confidences[:, i] -= confidences[:, k]\n sum_of_confidences[:, j] += confidences[:, k]\n votes[predictions[:, k] == 0, i] += 1\n votes[predictions[:, k] == 1, j] += 1\n k += 1\n transformed_confidences = sum_of_confidences / (3 * (np.abs(sum_of_confidences) + 1))\n return votes + transformed_confidences", + "docstring": "Compute a continuous, tie-breaking OvR decision function from OvO. It is important to include a continuous value, not only votes, to make computing AUC or calibration meaningful. Parameters ---------- predictions : array-like of shape (n_samples, n_classifiers) Predicted classes for each binary classifier. confidences : array-like of shape (n_samples, n_classifiers) Decision functions or predicted probabilities for positive class for each binary classifier. n_classes : int Number of classes. n_classifiers must be ``.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\multiclass.py", + "ast_data": "FunctionDef name:_ovr_decision_function arg:predictions arg:confidences arg:n_classes arguments arg arg arg Assign Assign Call Assign Call Assign For Call For Call Compare Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, *args, **kwargs):\n super().__init__()\n self._storage = self._make_storage(*args, **kwargs)\n self._storage.update({key: self._track_value(value, name=self._name_element(key)) for key, value in self._storage.items()})", + "docstring": "Construct a new sequence. Arguments are passed to .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "initialize_variables", + "source_code": "@tf_export(v1=['initialize_variables'])\n@tf_should_use.should_use_result\n@deprecated('2017-03-02', 'Use `tf.variables_initializer` instead.')\ndef initialize_variables(var_list, name='init'):\n return variables_initializer(var_list, name=name)", + "docstring": "See .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:initialize_variables arg:var_list arg:name arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "authlib", + "name": "create_query_token_func", + "source_code": "def create_query_token_func(session, token_model):\n\n def query_token(token, token_type_hint):\n q = session.query(token_model)\n if token_type_hint == 'access_token':\n return q.filter_by(access_token=token).first()\n elif token_type_hint == 'refresh_token':\n return q.filter_by(refresh_token=token).first()\n item = q.filter_by(access_token=token).first()\n if item:\n return item\n return q.filter_by(refresh_token=token).first()\n return query_token", + "docstring": "Create an `` function for revocation, introspection token endpoints. :param session: SQLAlchemy session :param token_model: Token model class", + "type": "function", + "file_path": "authlib\\authlib\\integrations\\sqla_oauth2\\functions.py", + "ast_data": "FunctionDef name:create_query_token_func arg:session arg:token_model arguments arg arg FunctionDef name:query_token arg:token arg:token_type_hint arguments arg arg Assign Call If Compare Return return:yes Call Call If Compare Return return:yes Call Call Assign Call Call If Return return:yes Return return:yes Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "convert_to_list_like", + "source_code": "def convert_to_list_like(values: Hashable | Iterable | AnyArrayLike) -> list | AnyArrayLike:\n if isinstance(values, (list, np.ndarray, ABCIndex, ABCSeries, ABCExtensionArray)):\n return values\n elif isinstance(values, abc.Iterable) and (not isinstance(values, str)):\n return list(values)\n return [values]", + "docstring": "Convert list-like or scalar input to list-like. List, numpy and pandas array-like inputs are returned unmodified whereas others are converted to list.", + "type": "function", + "file_path": "pandas\\pandas\\core\\common.py", + "ast_data": "FunctionDef name:convert_to_list_like arg:values arguments arg If Call Return return:yes If BoolOp Call Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "max_pool3d", + "source_code": "@tf_export('nn.max_pool3d')\n@dispatch.add_dispatch_support\ndef max_pool3d(input, ksize, strides, padding, data_format='NDHWC', name=None):\n with ops.name_scope(name, 'MaxPool3D', [input]) as name:\n if data_format is None:\n data_format = 'NDHWC'\n channel_index = 1 if data_format.startswith('NC') else 4\n ksize = _get_sequence(ksize, 3, channel_index, 'ksize')\n strides = _get_sequence(strides, 3, channel_index, 'strides')\n return gen_nn_ops.max_pool3d(input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)", + "docstring": "Performs the max pooling on the input. Args: input: A 5-D of the format specified by . ksize: An int or list of that has length , or . The size of the window for each dimension of the input tensor. strides: An int or list of that has length , or . The stride of the sliding window for each dimension of the input tensor. padding: A string, either or . The padding algorithm. See [here]( for more information. data_format: An optional string from: \"NDHWC\", \"NCDHW\". Defaults to \"NDHWC\". The data format of the input and output data. With the default format \"NDHWC\", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels]. Alternatively, the format could be \"NCDHW\", the data storage order is: [batch, in_channels, in_depth, in_height, in_width]. name: A name for the operation (optional). Returns: A of format specified by . The max pooled output tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:max_pool3d arg:input arg:ksize arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg With Call If Compare Assign Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "DegenerateDataWarning", + "source_code": "class DegenerateDataWarning(RuntimeWarning):\n\n def __init__(self, msg=None):\n if msg is None:\n msg = 'Degenerate data encountered; results may not be reliable.'\n self.args = (msg,)", + "docstring": "Warns when data is degenerate and results may not be reliable.", + "type": "class", + "file_path": "scipy\\scipy\\stats\\_warnings_errors.py", + "ast_data": "ClassDef name:DegenerateDataWarning FunctionDef name:__init__ arg:self arg:msg arguments arg arg If Compare Assign Assign" + }, + { + "library": "pandas", + "name": "_get_atom", + "source_code": "@classmethod\ndef _get_atom(cls, values: ArrayLike) -> Col:\n dtype = values.dtype\n itemsize = dtype.itemsize\n shape = values.shape\n if values.ndim == 1:\n shape = (1, values.size)\n if isinstance(values, Categorical):\n codes = values.codes\n atom = cls.get_atom_data(shape, kind=codes.dtype.name)\n elif lib.is_np_dtype(dtype, 'M') or isinstance(dtype, DatetimeTZDtype):\n atom = cls.get_atom_datetime64(shape)\n elif lib.is_np_dtype(dtype, 'm'):\n atom = cls.get_atom_timedelta64(shape)\n elif is_complex_dtype(dtype):\n atom = _tables().ComplexCol(itemsize=itemsize, shape=shape[0])\n elif is_string_dtype(dtype):\n atom = cls.get_atom_string(shape, itemsize)\n else:\n atom = cls.get_atom_data(shape, kind=dtype.name)\n return atom", + "docstring": "Get an appropriately typed and shaped pytables.Col object for values.", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:_get_atom arg:cls arg:values arguments arg arg Assign Assign Assign If Compare Assign If Call Assign Assign Call If BoolOp Call Call Assign Call If Call Assign Call If Call Assign Call Call If Call Assign Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "get_laplacian_kernel1d", + "source_code": "def get_laplacian_kernel1d(kernel_size: int, *, device: Optional[Device]=None, dtype: Dtype=torch.float32) -> Tensor:\n _check_kernel_size(kernel_size)\n return laplacian_1d(kernel_size, device=device, dtype=dtype)", + "docstring": "Return the coefficients of a 1D Laplacian filter. Args: kernel_size: filter size. It should be odd and positive. device: tensor device desired to create the kernel dtype: tensor dtype desired to create the kernel Returns: 1D tensor with laplacian filter coefficients. Shape: - Output: math: Examples: >>> get_laplacian_kernel1d(3) tensor([ 1., -2., 1.]) >>> get_laplacian_kernel1d(5) tensor([ 1., 1., -4., 1., 1.])", + "type": "function", + "file_path": "kornia\\kornia\\filters\\kernels.py", + "ast_data": "FunctionDef name:get_laplacian_kernel1d arg:kernel_size arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_handle_per_output_metrics", + "source_code": "def _handle_per_output_metrics(self, metrics_dict, y_true, y_pred, mask, weights=None):\n metric_results = []\n for metric_name, metric_fn in metrics_dict.items():\n with backend.name_scope(metric_name):\n metric_result = training_utils_v1.call_metric_function(metric_fn, y_true, y_pred, weights=weights, mask=mask)\n metric_results.append(metric_result)\n return metric_results", + "docstring": "Calls metric functions for a single output. Args: metrics_dict: A dict with metric names as keys and metric fns as values. y_true: Target output. y_pred: Predicted output. mask: Computed mask value for the current output. weights: Weights to be applied on the current output. Returns: A list of metric result tensors.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py", + "ast_data": "FunctionDef name:_handle_per_output_metrics arg:self arg:metrics_dict arg:y_true arg:y_pred arg:mask arg:weights arguments arg arg arg arg arg arg Assign For Call With Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "shutdown", + "source_code": "def shutdown(self) -> None:\n self.executor.shutdown()\n for p in self.processes:\n p.shutdown(wait=False)\n for p in self.processes:\n p.wait()", + "docstring": "Signal all child processes to exit.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\autotune_process.py", + "ast_data": "FunctionDef name:shutdown arg:self arguments arg Call For Call For Call" + }, + { + "library": "seaborn", + "name": "__init__", + "source_code": "def __init__(self, order: list[str] | dict[str, list | None]):\n if not order:\n raise ValueError('GroupBy requires at least one grouping variable')\n if isinstance(order, list):\n order = {k: None for k in order}\n self.order = order", + "docstring": "Initialize the GroupBy from grouping variables and optional level orders. Parameters ---------- order List of variable names or dict mapping names to desired level orders. Level order values can be None to use default ordering rules. The variables can include names that are not expected to appear in the data; these will be dropped before the groups are defined.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\groupby.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:order arguments arg arg If Raise Call If Call Assign Assign" + }, + { + "library": "cherrypy", + "name": "__repr__", + "source_code": "def __repr__(self):\n return '%s.%s(%r, %r)' % (self.__module__, self.__class__.__name__, self.root, self.script_name)", + "docstring": "Generate a representation of the Application instance.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cptree.py", + "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "cryptography", + "name": "SignatureAlgorithm", + "source_code": "class SignatureAlgorithm(utils.Enum):\n ANONYMOUS = 0\n RSA = 1\n DSA = 2\n ECDSA = 3", + "docstring": "Signature algorithms that are valid for SCTs. These are exactly the same as SignatureAlgorithm in RFC 5246 (TLS 1.2). See:", + "type": "class", + "file_path": "cryptography\\src\\cryptography\\x509\\certificate_transparency.py", + "ast_data": "ClassDef name:SignatureAlgorithm Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "_backward_function_wrapper", + "source_code": "def _backward_function_wrapper(*args):\n if not backward.outputs:\n return backward.structured_outputs\n processed_args = []\n input_index = 0\n for output_index, arg in enumerate(args):\n if isinstance(arg, indexed_slices.IndexedSlices):\n arg = ops.convert_to_tensor(arg)\n if output_index in skip_positions:\n continue\n if arg is None:\n input_placeholder = backward.inputs[input_index]\n if input_placeholder.dtype == dtypes.variant:\n arg = variant_zeros_like[output_index]\n else:\n arg = array_ops.zeros(*default_gradient.shape_and_dtype(input_placeholder))\n processed_args.append(arg)\n input_index += 1\n if input_index >= backward_function_inputs:\n break\n return backward._call_flat(processed_args, remapped_captures)", + "docstring": "Process output gradients and call the backward function.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", + "ast_data": "FunctionDef name:_backward_function_wrapper arguments arg If Return return:yes Assign Assign For Call If Call Assign Call If Compare If Compare Assign If Compare Assign Assign Call Call Call If Compare Return return:yes Call" + }, + { + "library": "pytorch", + "name": "smooth_l1_loss", + "source_code": "def smooth_l1_loss(input: Tensor, target: Tensor, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean', beta: float=1.0) -> Tensor:\n if has_torch_function_variadic(input, target):\n return handle_torch_function(smooth_l1_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction, beta=beta)\n if not target.size() == input.size():\n warnings.warn(f'Using a target size ({target.size()}) that is different to the input size ({input.size()}). This will likely lead to incorrect results due to broadcasting. Please ensure they have the same size.', stacklevel=2)\n if size_average is not None or reduce is not None:\n reduction = _Reduction.legacy_get_string(size_average, reduce)\n expanded_input, expanded_target = torch.broadcast_tensors(input, target)\n if beta == 0.0:\n return torch._C._nn.l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction))\n else:\n return torch._C._nn.smooth_l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction), beta)", + "docstring": "Compute the Smooth L1 loss. Function uses a squared term if the absolute element-wise error falls below beta and an L1 term otherwise. See :class: for details. Args: input (Tensor): Predicted values. target (Tensor): Ground truth values. size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken. 'sum': the output will be summed. 'none': no reduction will be applied. Default: 'mean'. beta (float, optional): Specifies the threshold at which to change from the squared term to the L1 term in the loss calculation. This value must be positive. Default: 1.0. Returns: Tensor: L1 loss (optionally weighted).", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:smooth_l1_loss arg:input arg:target arg:size_average arg:reduce arg:reduction arg:beta arguments arg arg arg arg arg arg If Call Return return:yes Call If Compare Call Call Call Call Call If BoolOp Compare Compare Assign Call Assign Call If Compare Return return:yes Call Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "_parse_char_metrics", + "source_code": "def _parse_char_metrics(fh):\n required_keys = {'C', 'WX', 'N', 'B'}\n ascii_d = {}\n name_d = {}\n for line in fh:\n line = _to_str(line.rstrip())\n if line.startswith('EndCharMetrics'):\n return (ascii_d, name_d)\n vals = dict((s.strip().split(' ', 1) for s in line.split(';') if s))\n if not required_keys.issubset(vals):\n raise RuntimeError('Bad char metrics line: %s' % line)\n num = _to_int(vals['C'])\n wx = _to_float(vals['WX'])\n name = vals['N']\n bbox = _to_list_of_floats(vals['B'])\n bbox = list(map(int, bbox))\n metrics = CharMetrics(wx, name, bbox)\n if name == 'Euro':\n num = 128\n elif name == 'minus':\n num = ord('−')\n if num != -1:\n ascii_d[num] = metrics\n name_d[name] = metrics\n raise RuntimeError('Bad parse')", + "docstring": "Parse the given filehandle for character metrics information and return the information as dicts. It is assumed that the file cursor is on the line behind 'StartCharMetrics'. Returns ------- ascii_d : dict A mapping \"ASCII num of the character\" to . name_d : dict A mapping \"character name\" to . Notes ----- This function is incomplete per the standard, but thus far parses all the sample afm files tried.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\_afm.py", + "ast_data": "FunctionDef name:_parse_char_metrics arg:fh arguments arg Assign Assign Assign For Assign Call Call If Call Return return:yes Assign Call Call Call Call If Call Raise Call Assign Call Assign Call Assign Assign Call Assign Call Call Assign Call If Compare Assign If Compare Assign Call If Compare Assign Assign Raise Call" + }, + { + "library": "authlib", + "name": "get_jwt_config", + "source_code": "def get_jwt_config(self):\n raise NotImplementedError()", + "docstring": "Get the JWT configuration for OpenIDImplicitGrant. The JWT configuration will be used to generate ``. Developers MUST implement this method in subclass, e.g.:: def get_jwt_config(self): return { \"key\": read_private_key_file(key_path), \"alg\": \"RS256\", \"iss\": \"issuer-identity\", \"exp\": 3600, } :return: dict", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\core\\grants\\implicit.py", + "ast_data": "FunctionDef name:get_jwt_config arg:self arguments arg Raise Call" + }, + { + "library": "scikit-learn", + "name": "trace_dot", + "source_code": "def trace_dot(X, Y):\n return np.dot(X.ravel(), Y.ravel())", + "docstring": "Trace of np.dot(X, Y.T). Parameters ---------- X : array-like First matrix. Y : array-like Second matrix.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py", + "ast_data": "FunctionDef name:trace_dot arg:X arg:Y arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "scrapy", + "name": "update_classpath", + "source_code": "def update_classpath(path: Any) -> Any:\n for prefix, replacement in DEPRECATION_RULES:\n if isinstance(path, str) and path.startswith(prefix):\n new_path = path.replace(prefix, replacement, 1)\n warnings.warn(f'`{path}` class is deprecated, use `{new_path}` instead', ScrapyDeprecationWarning)\n return new_path\n return path", + "docstring": "Update a deprecated path from an object with its new location", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\deprecate.py", + "ast_data": "FunctionDef name:update_classpath arg:path arguments arg For If BoolOp Call Call Assign Call Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "maybe_merge_call", + "source_code": "@tf_export('__internal__.distribute.interim.maybe_merge_call', v1=[])\ndef maybe_merge_call(fn, strategy, *args, **kwargs):\n if strategy_supports_no_merge_call():\n return fn(strategy, *args, **kwargs)\n else:\n return distribute_lib.get_replica_context().merge_call(fn, args=args, kwargs=kwargs)", + "docstring": "Maybe invoke via which may or may not be fulfilled. The caller of this utility function requests to invoke via at 's best efforts. It is 's internal whether the request is honored, depending on the . See for more information. This is an interim API which is subject to removal and does not guarantee backward-compatibility. Args: fn: the function to be invoked. strategy: the to call with. *args: the positional arguments to be passed in to . **kwargs: the keyword arguments to be passed in to . Returns: The return value of the call.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\merge_call_interim.py", + "ast_data": "FunctionDef name:maybe_merge_call arg:fn arg:strategy arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "is_immutable", + "source_code": "def is_immutable(self):\n return self.mutation_type is None", + "docstring": "Whether Dynamo bans mutation on this variable.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py", + "ast_data": "FunctionDef name:is_immutable arg:self arguments arg Return return:yes Compare" + }, + { + "library": "kornia", + "name": "_transform_input", + "source_code": "def _transform_input(input: Tensor) -> Tensor:\n if not torch.is_tensor(input):\n raise TypeError(f'Input type is not a Tensor. Got {type(input)}')\n if len(input.shape) not in [2, 3, 4]:\n raise ValueError(f'Input size must have a shape of either (H, W), (C, H, W) or (*, C, H, W). Got {input.shape}')\n if len(input.shape) == 2:\n input = input.unsqueeze(0)\n if len(input.shape) == 3:\n input = input.unsqueeze(0)\n return input", + "docstring": "Reshape an input tensor to be (*, C, H, W). Accept either (H, W), (C, H, W) or (*, C, H, W). Args: input: Tensor Returns: Tensor", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py", + "ast_data": "FunctionDef name:_transform_input arg:input arguments arg If Call Raise Call Call If Compare Call Raise Call If Compare Call Assign Call If Compare Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "process_request", + "source_code": "def process_request(self, request):\n if request.method not in ('GET', 'HEAD'):\n request._cache_update_cache = False\n return None\n cache_key = get_cache_key(request, self.key_prefix, 'GET', cache=self.cache)\n if cache_key is None:\n request._cache_update_cache = True\n return None\n response = self.cache.get(cache_key)\n if response is None and request.method == 'HEAD':\n cache_key = get_cache_key(request, self.key_prefix, 'HEAD', cache=self.cache)\n response = self.cache.get(cache_key)\n if response is None:\n request._cache_update_cache = True\n return None\n if (max_age_seconds := get_max_age(response)) is not None and (expires_timestamp := parse_http_date_safe(response['Expires'])) is not None:\n now_timestamp = int(time.time())\n remaining_seconds = expires_timestamp - now_timestamp\n response['Age'] = max(0, max_age_seconds - remaining_seconds)\n request._cache_update_cache = False\n return response", + "docstring": "Check whether the page is already cached and return the cached version if available.", + "type": "method", + "file_path": "django\\django\\middleware\\cache.py", + "ast_data": "FunctionDef name:process_request arg:self arg:request arguments arg arg If Compare Assign Return return:no Assign Call If Compare Assign Return return:no Assign Call If BoolOp Compare Compare Assign Call Assign Call If Compare Assign Return return:no If BoolOp Compare Call Compare Call Assign Call Call Assign Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "AutoDynamic", + "source_code": "class AutoDynamic(enum.Enum):\n token = 0", + "docstring": "The top element of our (bounded) semilattice, whenever you merge this with any other element you always get it again", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\pgo.py", + "ast_data": "ClassDef name:AutoDynamic Assign" + }, + { + "library": "pytorch", + "name": "delete_user_reference", + "source_code": "def delete_user_reference(node, user):\n assert len(user.kwargs) == 0\n use_idxs = [i for i, arg in enumerate(user.args) if arg == node]\n assert len(use_idxs) == 1\n args_copy = list(user.args)\n args_copy.pop(use_idxs[0])\n user.args = tuple(args_copy)\n logger.debug(f'Deleted {node} from user {user}, arg index = {use_idxs[0]}')", + "docstring": "Delete reference of from 's arg list. Args: - node: a node at root. - user: a submodule node that uses .", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py", + "ast_data": "FunctionDef name:delete_user_reference arg:node arg:user arguments arg arg Compare Call Assign Call Compare Compare Call Assign Call Call Assign Call Call" + }, + { + "library": "pytorch", + "name": "serialize", + "source_code": "def serialize(self) -> _WireProtocolPickledOutput:\n from torch.fx._graph_pickler import GraphPickler\n if isinstance(self.graph, CompiledFxGraph):\n self.graph.prepare_for_serialization()\n return _WireProtocolPickledOutput(GraphPickler.dumps(self))", + "docstring": "Turns this object into a _WireProtocolPickledOutput which can be directly transferred across a stream.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\compile_fx_ext.py", + "ast_data": "FunctionDef name:serialize arg:self arguments arg If Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "save", + "source_code": "def save(self, state_dict: STATE_DICT_TYPE) -> Metadata:\n return saver.save(state_dict, self.storage_writer, process_group=self.process_group, coordinator_rank=self.coordinator_rank, no_dist=self.no_dist, planner=self.save_planner)", + "docstring": "Calls :py:meth: . Utilizing values passed during initialization.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\_checkpointer.py", + "ast_data": "FunctionDef name:save arg:self arg:state_dict arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "log_addition", + "source_code": "def log_addition(self, request, obj, message):\n from django.contrib.admin.models import ADDITION, LogEntry\n return LogEntry.objects.log_actions(user_id=request.user.pk, queryset=[obj], action_flag=ADDITION, change_message=message, single_object=True)", + "docstring": "Log that an object has been successfully added. The default implementation creates an admin LogEntry object.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:log_addition arg:self arg:request arg:obj arg:message arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_last_sl", + "source_code": "def get_last_sl(self):\n return self._last_sl", + "docstring": "Return last computed sparsity level by current scheduler.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\pruning\\scheduler\\base_scheduler.py", + "ast_data": "FunctionDef name:get_last_sl arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "from_list_qconfig_mapping", + "source_code": "@classmethod\ndef from_list_qconfig_mapping(cls, qconfig_mapping_list: list[QConfigMapping]) -> QConfigMultiMapping:\n new_qconfig_multi_mapping = cls()\n new_qconfig_multi_mapping.qconfig_mappings_list = copy.deepcopy(qconfig_mapping_list)\n for style in _QCONFIG_STYLE_ORDER[1:]:\n qconfig_dict_list: dict[Any, list[QConfigAny]] = {}\n for qconfig_mapping in qconfig_mapping_list:\n qconfig_dict = getattr(qconfig_mapping, style)\n for key, qconfig in qconfig_dict.items():\n if key not in qconfig_dict_list:\n qconfig_dict_list[key] = []\n qconfig_dict_list[key].append(qconfig)\n set_method_name = _QCONFIG_STYLE_TO_METHOD[style]\n set_method = getattr(new_qconfig_multi_mapping, set_method_name)\n for key, qconfig_list in qconfig_dict_list.items():\n if isinstance(key, tuple):\n set_method(*key, qconfig_list)\n else:\n set_method(key, qconfig_list)\n return new_qconfig_multi_mapping", + "docstring": "Creates a QConfigMultiMapping from a list of QConfigMappings", + "type": "method", + "file_path": "pytorch\\torch\\ao\\ns\\fx\\qconfig_multi_mapping.py", + "ast_data": "FunctionDef name:from_list_qconfig_mapping arg:cls arg:qconfig_mapping_list arguments arg arg Assign Call Assign Call For For Assign Call For Call If Compare Assign Call Assign Assign Call For Call If Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_p_assert", + "source_code": "def _p_assert(cond: Any, s: str, raise_assertion_error: bool=True) -> None:\n if not cond:\n print(s)\n traceback.print_stack()\n if raise_assertion_error:\n raise AssertionError(s)", + "docstring": "Alternate to `` since otherwise, it is swallowed.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\utils.py", + "ast_data": "FunctionDef name:_p_assert arg:cond arg:s arg:raise_assertion_error arguments arg arg arg If Call Call If Raise Call" + }, + { + "library": "pytorch", + "name": "_is_all_annotated", + "source_code": "def _is_all_annotated(nodes: list[Node]):\n return all((_is_node_annotated(node) for node in nodes))", + "docstring": "Given a list of nodes (that represents an operator pattern), return True if all of the node is annotated, otherwise return False.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\x86_inductor_quantizer.py", + "ast_data": "FunctionDef name:_is_all_annotated arg:nodes arguments arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "fit", + "source_code": "def fit(self, x):\n x = np.asarray(x)\n if x.ndim != 2:\n raise ValueError(\"'x' must be two dimensional.\")\n if not np.allclose(np.linalg.norm(x, axis=-1), 1.0):\n msg = \"'x' must be unit vectors of norm 1 along last dimension.\"\n raise ValueError(msg)\n dim = x.shape[-1]\n dirstats = directional_stats(x)\n mu = dirstats.mean_direction\n r = dirstats.mean_resultant_length\n halfdim = 0.5 * dim\n\n def solve_for_kappa(kappa):\n bessel_vals = ive([halfdim, halfdim - 1], kappa)\n return bessel_vals[0] / bessel_vals[1] - r\n root_res = root_scalar(solve_for_kappa, method='brentq', bracket=(1e-08, 1000000000.0))\n kappa = root_res.root\n return (mu, kappa)", + "docstring": "Fit the von Mises-Fisher distribution to data. Parameters ---------- x : array-like Data the distribution is fitted to. Must be two dimensional. The second axis of must be unit vectors of norm 1 and determine the dimensionality of the fitted von Mises-Fisher distribution. Returns ------- mu : ndarray Estimated mean direction. kappa : float Estimated concentration parameter.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:fit arg:self arg:x arguments arg arg Assign Call If Compare Raise Call If Call Call Assign Raise Call Assign Assign Call Assign Assign Assign FunctionDef name:solve_for_kappa arg:kappa arguments arg Assign Call Return return:yes Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "get", + "source_code": "def get(self, key: str, default: Any=None) -> Any:\n return self.config.get(key, default)", + "docstring": "Return the value for ``.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py", + "ast_data": "FunctionDef name:get arg:self arg:key arg:default arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "dtype", + "source_code": "@property\ndef dtype(self):\n return self._dtype", + "docstring": "The of s handled by this .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes" + }, + { + "library": "seaborn", + "name": "PolyFit", + "source_code": "@dataclass\nclass PolyFit(Stat):\n order: int = 2\n gridsize: int = 100\n\n def _fit_predict(self, data):\n x = data['x']\n y = data['y']\n if x.nunique() <= self.order:\n xx = yy = []\n else:\n p = np.polyfit(x, y, self.order)\n xx = np.linspace(x.min(), x.max(), self.gridsize)\n yy = np.polyval(p, xx)\n return pd.DataFrame(dict(x=xx, y=yy))\n\n def __call__(self, data, groupby, orient, scales):\n return groupby.apply(data.dropna(subset=['x', 'y']), self._fit_predict)", + "docstring": "Fit a polynomial of the given order and resample data onto predicted curve.", + "type": "class", + "file_path": "seaborn\\seaborn\\_stats\\regression.py", + "ast_data": "ClassDef name:PolyFit FunctionDef name:_fit_predict arg:self arg:data arguments arg arg Assign Assign If Compare Call Assign Assign Call Assign Call Call Call Assign Call Return return:yes Call Call FunctionDef name:__call__ arg:self arg:data arg:groupby arg:orient arg:scales arguments arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "apply_sharding", + "source_code": "def apply_sharding(datapipe: DataPipe, num_of_instances: int, instance_id: int, sharding_group=SHARDING_PRIORITIES.DEFAULT) -> DataPipe:\n graph = traverse_dps(datapipe)\n\n def _helper(graph, prev_applied=None):\n for dp, sub_graph in graph.values():\n applied = None\n if _is_sharding_datapipe(dp):\n if prev_applied is not None:\n raise RuntimeError(f'Sharding twice on a single pipeline is likely unintended and will cause data loss. Sharding already applied to {prev_applied} while trying to apply to {dp}')\n sig = inspect.signature(dp.apply_sharding)\n if len(sig.parameters) < 3:\n dp.apply_sharding(num_of_instances, instance_id)\n else:\n dp.apply_sharding(num_of_instances, instance_id, sharding_group=sharding_group)\n applied = dp\n if applied is None:\n applied = prev_applied\n _helper(sub_graph, applied)\n _helper(graph)\n return datapipe", + "docstring": "Apply dynamic sharding over the `` are presented in the same branch.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\data\\graph_settings.py", + "ast_data": "FunctionDef name:apply_sharding arg:datapipe arg:num_of_instances arg:instance_id arg:sharding_group arguments arg arg arg arg Assign Call FunctionDef name:_helper arg:graph arg:prev_applied arguments arg arg For Call Assign If Call If Compare Raise Call Assign Call If Compare Call Call Call Assign If Compare Assign Call Call Return return:yes" + }, + { + "library": "cryptography", + "name": "value", + "source_code": "@property\n@abc.abstractmethod\ndef value(self) -> typing.Any:\n pass", + "docstring": "Return the value of the object", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\x509\\general_name.py", + "ast_data": "FunctionDef name:value arg:self arguments arg" + }, + { + "library": "scrapy", + "name": "stop", + "source_code": "def stop(self) -> Deferred[Any]:\n return self._stop()", + "docstring": "Stops simultaneously all the crawling jobs taking place. Returns a deferred that is fired when they all have ended.", + "type": "method", + "file_path": "scrapy\\scrapy\\crawler.py", + "ast_data": "FunctionDef name:stop arg:self arguments arg Return return:yes Call" + }, + { + "library": "pygame", + "name": "init", + "source_code": "def init(number_of_workers=0):\n global _wq, _use_workers\n if number_of_workers:\n _use_workers = number_of_workers\n else:\n _use_workers = benchmark_workers()\n _wq = WorkerQueue(_use_workers)", + "docstring": "Does a little test to see if threading is worth it. Sets up a global worker queue if it's worth it. Calling init() is not required, but is generally better to do.", + "type": "function", + "file_path": "pygame\\src_py\\threads\\__init__.py", + "ast_data": "FunctionDef name:init arg:number_of_workers arguments arg If Assign Assign Call Assign Call" + }, + { + "library": "pytorch", + "name": "apply", + "source_code": "def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n bound = self._model_signature.bind(*model_args, **model_kwargs)\n bound.apply_defaults()\n if bound.kwargs:\n raise ValueError('Keyword-only arguments are not supported.')\n return ((), bound.arguments)", + "docstring": "Bind the input arguments to the model signature. We hope the input kwargs will be mapped to bound.args after binding. If not, we will raise an error. Args: model_args: The model args. model_kwargs: The model kwargs. model: The PyTorch model. Returns: A tuple of the model args and kwargs. args is always empty. Raises: ValueError: If there are keyword-only arguments left after binding args and kwargs to model signature.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py", + "ast_data": "FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Assign Call Call If Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "on_epoch_begin", + "source_code": "@doc_controls.for_subclass_implementers\ndef on_epoch_begin(self, epoch, logs=None):\n pass", + "docstring": "Called at the start of an epoch. Subclasses should override for any actions to run. This function should only be called during TRAIN mode. Args: epoch: Integer, index of epoch. logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:on_epoch_begin arg:self arg:epoch arg:logs arguments arg arg arg" + }, + { + "library": "matplotlib", + "name": "inaxes", + "source_code": "def inaxes(self, xy):\n axes_list = [a for a in self.figure.get_axes() if a.patch.contains_point(xy) and a.get_visible()]\n if axes_list:\n axes = cbook._topmost_artist(axes_list)\n else:\n axes = None\n return axes", + "docstring": "Return the topmost visible containing the point *xy*. Parameters ---------- xy : (float, float) (x, y) pixel positions from left/bottom of the canvas. Returns ------- or None The topmost visible Axes containing the point, or None if there is no Axes at the point.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:inaxes arg:self arg:xy arguments arg arg Assign Call BoolOp Call Call If Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_is_current_explicit_device", + "source_code": "def _is_current_explicit_device(device_type):\n device_type = device_type.upper()\n if device_type not in ['CPU', 'GPU']:\n raise ValueError('`device_type` should be either \"CPU\" or \"GPU\".')\n device = _get_current_tf_device()\n return device is not None and device.device_type == device_type.upper()", + "docstring": "Check if the current device is explicitly set on the device type specified. Args: device_type: A string containing or (case-insensitive). Returns: A boolean indicating if the current device scope is explicitly set on the device type. Raises: ValueError: If the string indicates an unsupported device.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:_is_current_explicit_device arg:device_type arguments arg Assign Call If Compare Raise Call Assign Call Return return:yes BoolOp Compare Compare Call" + }, + { + "library": "pytorch", + "name": "get_tma_workspace_arg", + "source_code": "def get_tma_workspace_arg(num_tma_descriptors: int, device: torch.device, num_programs: Optional[int]=None) -> WorkspaceArg:\n from .codegen.common import WorkspaceArg, WorkspaceZeroMode\n if num_programs is None:\n num_programs = get_num_sms()\n zero_mode = WorkspaceZeroMode.from_bool(False)\n size = num_programs * num_tma_descriptors * TMA_DESCRIPTOR_SIZE\n return WorkspaceArg(count=size, zero_mode=zero_mode, device=device, outer_name=WorkspaceArg.unique_name())", + "docstring": "Builds and returns a WorkspaceArg for the device side TMA workspace buffer.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\utils.py", + "ast_data": "FunctionDef name:get_tma_workspace_arg arg:num_tma_descriptors arg:device arg:num_programs arguments arg arg arg If Compare Assign Call Assign Call Assign Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "StyblinskiTang", + "source_code": "class StyblinskiTang(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))\n self.global_optimum = [[-2.90353401818596 for _ in range(self.N)]]\n self.fglob = -39.16616570377142 * self.N\n\n def fun(self, x, *args):\n self.nfev += 1\n return sum(x ** 4 - 16 * x ** 2 + 5 * x) / 2", + "docstring": "StyblinskiTang objective function. This class defines the Styblinski-Tang [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{StyblinskiTang}}(x) = \\sum_{i=1}^{n} \\left(x_i^4 - 16x_i^2 + 5x_i \\right) Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", + "ast_data": "ClassDef name:StyblinskiTang Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_op_node_and_weight_eq_obs", + "source_code": "def get_op_node_and_weight_eq_obs(input_eq_obs_node: Node, model: GraphModule, modules: dict[str, nn.Module]) -> tuple[Optional[Node], Optional[_WeightEqualizationObserver]]:\n op_node = None\n for user in input_eq_obs_node.users.keys():\n if node_supports_equalization(user, modules):\n op_node = user\n break\n assert op_node is not None\n if op_node.op == 'call_module':\n maybe_equalization_node_name_to_config = _get_observed_graph_module_attr(model, 'equalization_node_name_to_qconfig')\n assert maybe_equalization_node_name_to_config is not None\n equalization_node_name_to_qconfig: dict[str, Any] = maybe_equalization_node_name_to_config\n assert equalization_node_name_to_qconfig.get(op_node.name, None) is not None\n weight_eq_obs = equalization_node_name_to_qconfig.get(op_node.name, None).weight()\n assert isinstance(weight_eq_obs, _WeightEqualizationObserver)\n return (op_node, weight_eq_obs)\n elif op_node.op == 'call_function':\n weight_node = maybe_get_weight_eq_obs_node(op_node, modules)\n if weight_node is not None:\n weight_eq_obs = modules[str(weight_node.target)]\n assert isinstance(weight_eq_obs, _WeightEqualizationObserver)\n return (op_node, weight_eq_obs)\n return (None, None)", + "docstring": "Gets the following weight equalization observer. There should always exist a weight equalization observer after an input equalization observer. Returns the operation node that follows the input equalization observer node and the weight equalization observer", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py", + "ast_data": "FunctionDef name:get_op_node_and_weight_eq_obs arg:input_eq_obs_node arg:model arg:modules arguments arg arg arg Assign For Call If Call Assign Compare If Compare Assign Call Compare Compare Call Assign Call Call Call Return return:yes If Compare Assign Call If Compare Assign Call Call Return return:yes Return return:no" + }, + { + "library": "pandas", + "name": "_reductions", + "source_code": "def _reductions(func: Callable, values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True, min_count: int=0, axis: AxisInt | None=None, **kwargs):\n if not skipna:\n if mask.any() or check_below_min_count(values.shape, None, min_count):\n return libmissing.NA\n else:\n return func(values, axis=axis, **kwargs)\n else:\n if check_below_min_count(values.shape, mask, min_count) and (axis is None or values.ndim == 1):\n return libmissing.NA\n if values.dtype == np.dtype(object):\n values = values[~mask]\n return func(values, axis=axis, **kwargs)\n return func(values, where=~mask, axis=axis, **kwargs)", + "docstring": "Sum, mean or product for 1D masked array. Parameters ---------- func : np.sum or np.prod values : np.ndarray Numpy array with the values (can be of any dtype that support the operation). mask : np.ndarray[bool] Boolean numpy array (True values indicate missing values). skipna : bool, default True Whether to skip NA. min_count : int, default 0 The required number of valid values to perform the operation. If fewer than `` non-NA values are present the result will be NA. axis : int, optional, default None", + "type": "function", + "file_path": "pandas\\pandas\\core\\array_algos\\masked_reductions.py", + "ast_data": "FunctionDef name:_reductions arg:func arg:values arg:mask arguments arg arg arg arg arg arg arg If If BoolOp Call Call Return return:yes Return return:yes Call If BoolOp Call BoolOp Compare Compare Return return:yes If Compare Call Assign Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "RelaxedBernoulli", + "source_code": "class RelaxedBernoulli(TransformedDistribution):\n arg_constraints = {'probs': constraints.unit_interval, 'logits': constraints.real}\n support = constraints.unit_interval\n has_rsample = True\n base_dist: LogitRelaxedBernoulli\n\n def __init__(self, temperature: Tensor, probs: Optional[Union[Tensor, Number]]=None, logits: Optional[Union[Tensor, Number]]=None, validate_args: Optional[bool]=None) -> None:\n base_dist = LogitRelaxedBernoulli(temperature, probs, logits)\n super().__init__(base_dist, SigmoidTransform(), validate_args=validate_args)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(RelaxedBernoulli, _instance)\n return super().expand(batch_shape, _instance=new)\n\n @property\n def temperature(self) -> Tensor:\n return self.base_dist.temperature\n\n @property\n def logits(self) -> Tensor:\n return self.base_dist.logits\n\n @property\n def probs(self) -> Tensor:\n return self.base_dist.probs", + "docstring": "Creates a RelaxedBernoulli distribution, parametrized by :attr:, and either :attr: or :attr: (but not both). This is a relaxed version of the distribution, so the values are in (0, 1), and has reparametrizable samples. Example:: >>> # xdoctest: +IGNORE_WANT(\"non-deterministic\") >>> m = RelaxedBernoulli(torch.tensor([2.2]), ... torch.tensor([0.1, 0.2, 0.3, 0.99])) >>> m.sample() tensor([ 0.2951, 0.3442, 0.8918, 0.9021]) Args: temperature (Tensor): relaxation temperature probs (Number, Tensor): the probability of sampling logits (Number, Tensor): the log-odds of sampling", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\relaxed_bernoulli.py", + "ast_data": "ClassDef name:RelaxedBernoulli Assign Assign Assign FunctionDef name:__init__ arg:self arg:temperature arg:probs arg:logits arg:validate_args arguments arg arg arg arg arg Assign Call Call Call Call FunctionDef name:expand arg:self arg:batch_shape arg:_instance arguments arg arg arg Assign Call Return return:yes Call Call FunctionDef name:temperature arg:self arguments arg Return return:yes FunctionDef name:logits arg:self arguments arg Return return:yes FunctionDef name:probs arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "__call__", + "source_code": "def __call__(self, x, pos=None):\n if len(self.locs) == 0 or self.offset == 0:\n return self.fix_minus(self.format_data(x))\n else:\n xp = (x - self.offset) / 10.0 ** self.orderOfMagnitude\n if abs(xp) < 1e-08:\n xp = 0\n return self._format_maybe_minus_and_locale(self.format, xp)", + "docstring": "Return the format for tick value *x* at position *pos*. If there is no currently offset in the data, it returns the best engineering formatting that fits the given argument, independently.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg If BoolOp Compare Call Compare Return return:yes Call Call Assign If Compare Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_call_hook_before_run", + "source_code": "def _call_hook_before_run(self, run_context, fetch_dict, user_feed_dict, options):\n hook_feeds = {}\n for hook in self._hooks:\n request = hook.before_run(run_context)\n if request is not None:\n if request.fetches is not None:\n fetch_dict[hook] = request.fetches\n if request.feed_dict:\n self._raise_if_feeds_intersects(hook_feeds, request.feed_dict, 'Same tensor is fed by two hooks.')\n hook_feeds.update(request.feed_dict)\n if request.options:\n self._merge_run_options(options, request.options)\n if not hook_feeds:\n return user_feed_dict\n if not user_feed_dict:\n return hook_feeds\n self._raise_if_feeds_intersects(user_feed_dict, hook_feeds, 'Same tensor is fed by a SessionRunHook and user.')\n hook_feeds.update(user_feed_dict)\n return hook_feeds", + "docstring": "Calls hooks.before_run and handles requests from hooks.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py", + "ast_data": "FunctionDef name:_call_hook_before_run arg:self arg:run_context arg:fetch_dict arg:user_feed_dict arg:options arguments arg arg arg arg arg Assign For Assign Call If Compare If Compare Assign If Call Call If Call If Return return:yes If Return return:yes Call Call Return return:yes" + }, + { + "library": "django", + "name": "ewkb", + "source_code": "@property\ndef ewkb(self):\n return ewkb_w(3 if self.hasz else 2).write(self)", + "docstring": "Return the EWKB representation of this Geometry as a Python memoryview. This is an extension of the WKB specification that includes any SRID value that are a part of this geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:ewkb arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_create_or_restore_slot_variable", + "source_code": "def _create_or_restore_slot_variable(self, slot_variable_position, slot_name, variable):\n named_slots = self._slot_dict(slot_name)\n variable_key = _var_key(variable)\n slot_variable = named_slots.get(variable_key, None)\n if slot_variable is None and context.executing_eagerly() and slot_variable_position.is_simple_variable() and (not ops.get_default_graph()._variable_creator_stack):\n initializer = trackable.CheckpointInitialValueCallable(checkpoint_position=slot_variable_position)\n slot_variable = self._get_or_make_slot_with_initializer(var=variable, initializer=initializer, shape=variable.shape, dtype=variable.dtype, slot_name=slot_name, op_name=self._name)\n if slot_variable is not None:\n slot_variable_position.restore(slot_variable)\n else:\n self._deferred_slot_restorations.setdefault(slot_name, {}).setdefault(variable_key, []).append(slot_variable_position)", + "docstring": "Restore a slot variable's value, possibly creating it. Called when a variable which has an associated slot variable is created or restored. When executing eagerly, we create the slot variable with a restoring initializer. No new variables are created when graph building. Instead, _restore_slot_variable catches these after normal creation and adds restore ops to the graph. This method is nonetheless important when graph building for the case when a slot variable has already been created but has just been added to a dependency graph (causing us to realize that the slot variable needs to be restored). Args: slot_variable_position: A object indicating the slot variable object to be restored. slot_name: The name of this 's slot to restore into. variable: The variable object this slot is being created for.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py", + "ast_data": "FunctionDef name:_create_or_restore_slot_variable arg:self arg:slot_variable_position arg:slot_name arg:variable arguments arg arg arg arg Assign Call Assign Call Assign Call If BoolOp Compare Call Call Call Assign Call Assign Call If Compare Call Call Call Call" + }, + { + "library": "pandas", + "name": "__array__", + "source_code": "def __array__(self, dtype=None, copy=None) -> np.ndarray:\n if copy is False:\n raise ValueError('Unable to avoid copy while creating an array as requested.')\n if copy is True:\n return np.array(self.values, dtype=dtype)\n return self.values", + "docstring": "the array interface, return my values", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "FunctionDef name:__array__ arg:self arg:dtype arg:copy arguments arg arg arg If Compare Raise Call If Compare Return return:yes Call Return return:yes" + }, + { + "library": "scipy", + "name": "_rotate_samples", + "source_code": "def _rotate_samples(self, samples, mu, dim):\n base_point = np.zeros((dim,))\n base_point[0] = 1.0\n embedded = np.concatenate([mu[None, :], np.zeros((dim - 1, dim))])\n rotmatrix, _ = np.linalg.qr(np.transpose(embedded))\n if np.allclose(np.matmul(rotmatrix, base_point[:, None])[:, 0], mu):\n rotsign = 1\n else:\n rotsign = -1\n samples = np.einsum('ij,...j->...i', rotmatrix, samples) * rotsign\n return samples", + "docstring": "A QR decomposition is used to find the rotation that maps the north pole (1, 0,...,0) to the vector mu. This rotation is then applied to all samples. Parameters ---------- samples: array_like, shape = [..., n] mu : array-like, shape=[n, ] Point to parametrise the rotation. Returns ------- samples : rotated samples", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:_rotate_samples arg:self arg:samples arg:mu arg:dim arguments arg arg arg arg Assign Call Assign Assign Call Call Assign Call Call If Call Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "hash_configs", + "source_code": "def hash_configs(configs: list[Config]):\n hasher = hashlib.sha256()\n for cfg in configs:\n hasher.update(f'{sorted(cfg.kwargs.items())} {cfg.num_warps} {cfg.num_stages}\\n'.encode())\n return hasher.hexdigest()", + "docstring": "Hash used to check for changes in configurations", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py", + "ast_data": "FunctionDef name:hash_configs arg:configs arguments arg Assign Call For Call Call Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "get_left", + "source_code": "def get_left(self):\n return self.left", + "docstring": "Return a reference to the left child tree object. Returns ------- left : ClusterNode The left child of the target node. If the node is a leaf, None is returned.", + "type": "method", + "file_path": "scipy\\scipy\\cluster\\hierarchy.py", + "ast_data": "FunctionDef name:get_left arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "welsch_loss", + "source_code": "def welsch_loss(img1: Tensor, img2: Tensor, reduction: str='none') -> Tensor:\n KORNIA_CHECK_IS_TENSOR(img1)\n KORNIA_CHECK_IS_TENSOR(img2)\n KORNIA_CHECK_SAME_SHAPE(img1, img2)\n KORNIA_CHECK_SAME_DEVICE(img1, img2)\n KORNIA_CHECK(reduction in ('mean', 'sum', 'none'), f'Given type of reduction is not supported. Got: {reduction}')\n loss = 1.0 - (-0.5 * (img1 - img2) ** 2).exp()\n if reduction == 'mean':\n loss = loss.mean()\n elif reduction == 'sum':\n loss = loss.sum()\n elif reduction == 'none':\n pass\n else:\n raise NotImplementedError('Invalid reduction option.')\n return loss", + "docstring": "Criterion that computes the Welsch [2] (aka. Leclerc [3]) loss. According to [1], we compute the Welsch loss as follows: .. math:: \\text{WL}(x, y) = 1 - exp(-\\frac{1}{2} (x - y)^{2}) Where: - :math: is the prediction. - :math: is the target to be regressed to. Reference: [1] [2] [3] Args: img1: the predicted tensor with shape :math:. img2: the target tensor with the same shape as img1. reduction: Specifies the reduction to apply to the output: ``: the output will be summed. Return: a scalar with the computed loss. Example: >>> img1 = torch.randn(2, 3, 32, 32, requires_grad=True) >>> img2 = torch.randn(2, 3, 32, 32) >>> output = welsch_loss(img1, img2, reduction=\"mean\") >>> output.backward()", + "type": "function", + "file_path": "kornia\\kornia\\losses\\welsch.py", + "ast_data": "FunctionDef name:welsch_loss arg:img1 arg:img2 arg:reduction arguments arg arg arg Call Call Call Call Call Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "kornia", + "name": "pad", + "source_code": "def pad(self, padding_size: Tensor) -> 'Keypoints':\n if not (len(padding_size.shape) == 2 and padding_size.size(1) == 4):\n raise RuntimeError(f'Expected padding_size as (B, 4). Got {padding_size.shape}.')\n self._data[..., 0] += padding_size[..., :1]\n self._data[..., 1] += padding_size[..., 2:3]\n return self", + "docstring": "Pad a bounding keypoints. Args: padding_size: (B, 4)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\keypoints.py", + "ast_data": "FunctionDef name:pad arg:self arg:padding_size arguments arg arg If BoolOp Compare Call Compare Call Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "apply_gradients", + "source_code": "def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n if distribute_lib.in_cross_replica_context():\n raise ValueError('apply_gradients() must be called in a replica context.')\n if not self._doing_dynamic_loss_scaling():\n return self._optimizer.apply_gradients(grads_and_vars, global_step, name)\n replica_context = distribute_lib.get_replica_context()\n grads_and_vars = tuple(grads_and_vars)\n return replica_context.merge_call(self._distributed_apply, args=(grads_and_vars, global_step, name))", + "docstring": "Apply gradients to variables. This is the second part of . It returns an that conditionally applies gradients if all gradient values are finite. Otherwise no update is performed (nor is incremented). Args: grads_and_vars: List of (gradient, variable) pairs as returned by . global_step: Optional to increment by one after the variables have been updated. name: Optional name for the returned operation. Default to the name passed to the constructor. Returns: An that conditionally applies the specified gradients. If was not None, that operation also increments . Raises: RuntimeError: If you should use instead.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale_optimizer.py", + "ast_data": "FunctionDef name:apply_gradients arg:self arg:grads_and_vars arg:global_step arg:name arguments arg arg arg arg If Call Raise Call If Call Return return:yes Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "test_db_signature", + "source_code": "def test_db_signature(self):\n settings_dict = self.connection.settings_dict\n return (settings_dict['HOST'], settings_dict['PORT'], settings_dict['ENGINE'], self._get_test_db_name())", + "docstring": "Return a tuple with elements of self.connection.settings_dict (a DATABASES setting value) that uniquely identify a database accordingly to the RDBMS particularities.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\creation.py", + "ast_data": "FunctionDef name:test_db_signature arg:self arguments arg Assign Return return:yes Call" + }, + { + "library": "kornia", + "name": "RgbToLab", + "source_code": "class RgbToLab(Module):\n ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n def forward(self, image: torch.Tensor) -> torch.Tensor:\n return rgb_to_lab(image)", + "docstring": "Convert an image from RGB to Lab. The image data is assumed to be in the range of :math:. Lab color is computed using the D65 illuminant and Observer 2. Returns: Lab version of the image. Shape: - image: :math: - output: :math: Examples: >>> input = torch.rand(2, 3, 4, 5) >>> lab = RgbToLab() >>> output = lab(input) # 2x3x4x5 Reference: [1] [2] [3]", + "type": "class", + "file_path": "kornia\\kornia\\color\\lab.py", + "ast_data": "ClassDef name:RgbToLab FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call" + }, + { + "library": "authlib", + "name": "rsa_sha1_signature", + "source_code": "def rsa_sha1_signature(base_string, rsa_private_key):\n from .rsa import sign_sha1\n base_string = to_bytes(base_string)\n s = sign_sha1(to_bytes(base_string), rsa_private_key)\n sig = binascii.b2a_base64(s)[:-1]\n return to_unicode(sig)", + "docstring": "Generate signature via RSA-SHA1 method, per _. The \"RSA-SHA1\" signature method uses the RSASSA-PKCS1-v1_5 signature algorithm as defined in _ (also known as PKCS#1), using SHA-1 as the hash function for EMSA-PKCS1-v1_5. To use this method, the client MUST have established client credentials with the server that included its RSA public key (in a manner that is beyond the scope of this specification). .. _: .. _:", + "type": "function", + "file_path": "authlib\\authlib\\oauth1\\rfc5849\\signature.py", + "ast_data": "FunctionDef name:rsa_sha1_signature arg:base_string arg:rsa_private_key arguments arg arg Assign Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "in_y_pred_range", + "source_code": "def in_y_pred_range(self, y):\n return self.interval_y_pred.includes(y)", + "docstring": "Return True if y is in the valid range of y_pred. Parameters ---------- y : ndarray", + "type": "method", + "file_path": "scikit-learn\\sklearn\\_loss\\loss.py", + "ast_data": "FunctionDef name:in_y_pred_range arg:self arg:y arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "as_sql", + "source_code": "def as_sql(self, compiler, connection):\n raise NotImplementedError('Subclasses must implement as_sql()')", + "docstring": "Responsible for returning a (sql, [params]) tuple to be included in the current query. Different backends can provide their own implementation, by providing an method and patching the Expression: Arguments: * compiler: the query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted . * connection: the database connection used for the current query. Return: (sql, params) Where is a string containing ordered sql parameters to be replaced with the elements of the list .", + "type": "method", + "file_path": "django\\django\\db\\models\\expressions.py", + "ast_data": "FunctionDef name:as_sql arg:self arg:compiler arg:connection arguments arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "converted_self", + "source_code": "def converted_self(self):\n raise NotImplementedError", + "docstring": "A copy of this Convertible to be modified during conversion. Returns: Implementations should return the copied instance, which in turn should be contained in converted_enclosing_graph(). This instance is the one that will be modified during conversion. Its main use will be in the implementations of convert_variable_to_constant().", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py", + "ast_data": "FunctionDef name:converted_self arg:self arguments arg Raise" + }, + { + "library": "pytorch", + "name": "_generate_qconfig_mapping_helper", + "source_code": "def _generate_qconfig_mapping_helper(self, detector_qconfig_info_combined: dict[str, DetectorQConfigInfo], generation_function: Callable) -> QConfigMapping:\n qconfig_mapping = QConfigMapping()\n for fqn, module in self._model.named_modules():\n if fqn in detector_qconfig_info_combined:\n qconfig_info_compiled = detector_qconfig_info_combined[fqn]\n generated_qconfig = generation_function(qconfig_info_compiled, module)\n qconfig_mapping.set_module_name(fqn, generated_qconfig)\n return qconfig_mapping", + "docstring": "This helper takes in the compiled detector qconfig info that has been compiled together and merges it into a QConfigMapping", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py", + "ast_data": "FunctionDef name:_generate_qconfig_mapping_helper arg:self arg:detector_qconfig_info_combined arg:generation_function arguments arg arg arg Assign Call For Call If Compare Assign Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "get_chunks", + "source_code": "def get_chunks(self, n_chunks: int | None=None):\n if n_chunks and n_chunks > 1:\n size = len(self._col)\n step = size // n_chunks\n if size % n_chunks != 0:\n step += 1\n for start in range(0, step * n_chunks, step):\n yield PandasColumn(self._col.iloc[start:start + step], self._allow_copy)\n else:\n yield self", + "docstring": "Return an iterator yielding the chunks. See for details on ``.", + "type": "method", + "file_path": "pandas\\pandas\\core\\interchange\\column.py", + "ast_data": "FunctionDef name:get_chunks arg:self arg:n_chunks arguments arg arg If BoolOp Compare Assign Call Assign If Compare For Call Call" + }, + { + "library": "scikit-learn", + "name": "_changed_params", + "source_code": "def _changed_params(estimator):\n params = estimator.get_params(deep=False)\n init_func = getattr(estimator.__init__, 'deprecated_original', estimator.__init__)\n init_params = inspect.signature(init_func).parameters\n init_params = {name: param.default for name, param in init_params.items()}\n\n def has_changed(k, v):\n if k not in init_params:\n return True\n if init_params[k] == inspect._empty:\n return True\n if isinstance(v, BaseEstimator) and v.__class__ != init_params[k].__class__:\n return True\n if repr(v) != repr(init_params[k]) and (not (is_scalar_nan(init_params[k]) and is_scalar_nan(v))):\n return True\n return False\n return {k: v for k, v in params.items() if has_changed(k, v)}", + "docstring": "Return dict (param_name: value) of parameters that were given to estimator with non-default values.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_pprint.py", + "ast_data": "FunctionDef name:_changed_params arg:estimator arguments arg Assign Call Assign Call Assign Call Assign Call FunctionDef name:has_changed arg:k arg:v arguments arg arg If Compare Return return:yes If Compare Return return:yes If BoolOp Call Compare Return return:yes If BoolOp Compare Call Call BoolOp Call Call Return return:yes Return return:yes Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "set_last_checkpoints_with_time", + "source_code": "def set_last_checkpoints_with_time(self, last_checkpoints_with_time):\n assert isinstance(last_checkpoints_with_time, list)\n self._last_checkpoints = last_checkpoints_with_time", + "docstring": "Sets the list of old checkpoint filenames and timestamps. Args: last_checkpoints_with_time: A list of tuples of checkpoint filenames and timestamps. Raises: AssertionError: If last_checkpoints_with_time is not a list.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py", + "ast_data": "FunctionDef name:set_last_checkpoints_with_time arg:self arg:last_checkpoints_with_time arguments arg arg Call Assign" + }, + { + "library": "tensorflow", + "name": "add_queue_runner", + "source_code": "@tf_export(v1=['train.queue_runner.add_queue_runner', 'train.add_queue_runner'])\n@deprecation.deprecated(None, _DEPRECATION_INSTRUCTION)\ndef add_queue_runner(qr, collection=ops.GraphKeys.QUEUE_RUNNERS):\n ops.add_to_collection(collection, qr)", + "docstring": "Adds a to a collection in the graph. When building a complex model that uses many queues it is often difficult to gather all the queue runners that need to be run. This convenience function allows you to add a queue runner to a well known collection in the graph. The companion method can be used to start threads for all the collected queue runners. @compatibility(TF2) QueueRunners are not compatible with eager execution. Instead, please use [tf.data]( to get data into your model. @end_compatibility Args: qr: A . collection: A specifying the graph collection to add the queue runner to. Defaults to .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\queue_runner_impl.py", + "ast_data": "FunctionDef name:add_queue_runner arg:qr arg:collection arguments arg arg Call Call Call" + }, + { + "library": "sphinx", + "name": "_relative_path", + "source_code": "def _relative_path(path: Path, root: Path, /) -> Path:\n if '..' in path.parts:\n path = path.resolve()\n if '..' in root.parts:\n root = root.resolve()\n if path.anchor != root.anchor or '..' in root.parts:\n return path\n if sys.version_info[:2] < (3, 12):\n return Path(os.path.relpath(path, root))\n return path.relative_to(root, walk_up=True)", + "docstring": "Return a relative filepath to *path* from the given *root* directory. This is an alternative of ``. It returns the original path if *path* and *root* are on different drives, which may happen on Windows.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\osutil.py", + "ast_data": "FunctionDef name:_relative_path arguments arg arg If Compare Assign Call If Compare Assign Call If BoolOp Compare Compare Return return:yes If Compare Return return:yes Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_config", + "source_code": "def get_config(self):\n return {}", + "docstring": "Returns the configuration of the initializer as a JSON-serializable dict. Returns: A JSON-serializable Python dict.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py", + "ast_data": "FunctionDef name:get_config arg:self arguments arg Return return:no" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, checkpoint_dir: Text, save_secs: Optional[int]=None, save_steps: Optional[int]=None, saver: Optional[saver_lib.Saver]=None, checkpoint_basename: Text='model.ckpt', scaffold: Optional[monitored_session.Scaffold]=None, listeners: Optional[List[basic_session_run_hooks.CheckpointSaverListener]]=None):\n save_path = os.path.join(checkpoint_dir, checkpoint_basename)\n logging.info('Create AsyncCheckpointSaverHook saving to path\\n%s', save_path)\n if listeners:\n logging.info(' with %d listener(s).', len(listeners))\n if saver is not None and scaffold is not None:\n raise ValueError('You cannot provide both saver and scaffold.')\n self._saver = saver\n self._save_thread = None\n self._write_graph_thread = None\n self._checkpoint_dir = checkpoint_dir\n self._save_path = save_path\n self._scaffold = scaffold\n self._timer = basic_session_run_hooks.SecondOrStepTimer(every_secs=save_secs, every_steps=save_steps)\n self._listeners = listeners or []\n self._steps_per_run = 1\n self._summary_writer = None\n self._global_step_tensor = None\n self._last_checkpoint_step = None\n global _END_TIME_OF_LAST_WRITE\n with _END_TIME_OF_LAST_WRITE_LOCK:\n if _END_TIME_OF_LAST_WRITE is None:\n _END_TIME_OF_LAST_WRITE = time.time()", + "docstring": "Initializes a . Args: checkpoint_dir: , base directory for the checkpoint files. save_secs: , save every N secs. save_steps: , save every N steps. saver: object, used for saving. checkpoint_basename: , base name for the checkpoint files. scaffold: , use to get saver object. listeners: List of subclass instances. Used for callbacks that run immediately before or after this hook saves the checkpoint. Raises: ValueError: One of or should be set. ValueError: At most one of or should be set.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\async_checkpoint.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:checkpoint_dir arg:save_secs arg:save_steps arg:saver arg:checkpoint_basename arg:scaffold arg:listeners arguments arg arg arg arg arg arg arg arg Assign Call Call If Call Call If BoolOp Compare Compare Raise Call Assign Assign Assign Assign Assign Assign Assign Call Assign BoolOp Assign Assign Assign Assign With If Compare Assign Call" + }, + { + "library": "seaborn", + "name": "SemanticMapping", + "source_code": "class SemanticMapping:\n map_type: str | None = None\n levels = None\n lookup_table = None\n\n def __init__(self, plotter):\n self.plotter = plotter\n\n def _check_list_length(self, levels, values, variable):\n message = ''\n if len(levels) > len(values):\n message = ' '.join([f'\\nThe {variable} list has fewer values ({len(values)})', f'than needed ({len(levels)}) and will cycle, which may', 'produce an uninterpretable plot.'])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n elif len(values) > len(levels):\n message = ' '.join([f'The {variable} list has more values ({len(values)})', f'than needed ({len(levels)}), which may not be intended.'])\n values = values[:len(levels)]\n if message:\n warnings.warn(message, UserWarning, stacklevel=6)\n return values\n\n def _lookup_single(self, key):\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)", + "docstring": "Base class for mapping data values to plot attributes.", + "type": "class", + "file_path": "seaborn\\seaborn\\_base.py", + "ast_data": "ClassDef name:SemanticMapping Assign Assign FunctionDef name:__init__ arg:self arg:plotter arguments arg arg Assign FunctionDef name:_check_list_length arg:self arg:levels arg:values arg:variable arguments arg arg arg arg Assign If Compare Call Call Assign Call Call Call Assign Call Call If Compare Call Call Assign Call Call Call Assign Call If Call Return return:yes FunctionDef name:_lookup_single arg:self arg:key arguments arg arg Return return:yes FunctionDef name:__call__ arg:self arg:key arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "monotone_map", + "source_code": "@staticmethod\ndef monotone_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR:\n x = ValueRanges.wrap(x)\n l = fn(x.lower)\n u = fn(x.upper)\n return ValueRanges(min(l, u), max(l, u))", + "docstring": "It's increasing or decreasing.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\_sympy\\value_ranges.py", + "ast_data": "FunctionDef name:monotone_map arg:x arg:fn arguments arg arg Assign Call Assign Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_prepare_replacement", + "source_code": "def _prepare_replacement(self, replaced, key):\n repl = self.replacements[key]\n new_nodes = ast_util.copy_clean(repl, preserve_annos=self.preserved_annos)\n if isinstance(new_nodes, gast.AST):\n new_nodes = [new_nodes]\n return new_nodes", + "docstring": "Prepares a replacement AST that's safe to swap in for a node. Args: replaced: ast.AST, the node being replaced key: Hashable, the key of the replacement AST Returns: ast.AST, the replacement AST", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\templates.py", + "ast_data": "FunctionDef name:_prepare_replacement arg:self arg:replaced arg:key arguments arg arg arg Assign Assign Call If Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "simple_floordiv_gcd", + "source_code": "def simple_floordiv_gcd(p: sympy.Basic, q: sympy.Basic) -> sympy.Basic:\n\n def integer_coefficient(x: sympy.Basic) -> int:\n integer_coefficients: list[int] = [abs(int(arg)) for arg in sympy.Mul.make_args(x) if isinstance(arg, (int, sympy.Integer))]\n return math.prod(integer_coefficients)\n\n def integer_factor(expr: sympy.Basic) -> int:\n integer_factors: Iterable[int] = map(integer_coefficient, sympy.Add.make_args(expr))\n return functools.reduce(math.gcd, integer_factors)\n gcd: int = math.gcd(integer_factor(p), integer_factor(q))\n p, q = (p / gcd, q / gcd)\n base_splits: list[tuple[sympy.Basic, ...]] = list(map(sympy.Mul.make_args, sympy.Add.make_args(p)))\n divisor_split: tuple[sympy.Basic, ...] = sympy.Mul.make_args(q)\n for x in divisor_split:\n if all((x in base_split for base_split in base_splits)):\n gcd = gcd * x\n return gcd", + "docstring": "Fast path for sympy.gcd, using a simple factoring strategy. We try to rewrite p and q in the form n*e*p1 + n*e*p2 and n*e*q0, where n is the greatest common integer factor and e is the largest syntactic common factor (i.e., common sub-expression) in p and q. Then the gcd returned is n*e, cancelling which we would be left with p1 + p2 and q0. Note that further factoring of p1 + p2 and q0 might be possible with sympy.factor (which uses domain-specific theories). E.g., we are unable to find that x*y + x + y + 1 is divisible by x + 1. More generally, when q is of the form q1 + q2 (instead of being already factored) it might be necessary to fall back on sympy.gcd.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\_sympy\\functions.py", + "ast_data": "FunctionDef name:simple_floordiv_gcd arg:p arg:q arguments arg arg FunctionDef name:integer_coefficient arg:x arguments arg Call Call Call Call Return return:yes Call FunctionDef name:integer_factor arg:expr arguments arg Call Call Return return:yes Call Call Call Call Assign Call Call Call Call For If Call Compare Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "calc_package", + "source_code": "def calc_package(self):\n package = self.f_globals.get('__package__')\n spec = self.f_globals.get('__spec__')\n if package is not None:\n if spec is not None and package != spec.parent:\n log.warning('__package__ != __spec__.parent (%r != %r)', package, spec.parent, stacklevel=3)\n return package\n elif spec is not None:\n return spec.parent\n else:\n log.warning(\"can't resolve package from __spec__ or __package__, falling back on __name__ and __path__\", stacklevel=3)\n package = self.f_globals['__name__']\n if '__path__' not in self.f_globals:\n package = package.rpartition('.')[0]\n return package", + "docstring": "Copied from the Cpython implementation of __import__", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\symbolic_convert.py", + "ast_data": "FunctionDef name:calc_package arg:self arguments arg Assign Call Assign Call If Compare If BoolOp Compare Compare Call Return return:yes If Compare Return return:yes Call Assign If Compare Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "chebgauss", + "source_code": "def chebgauss(deg):\n ideg = pu._as_int(deg, 'deg')\n if ideg <= 0:\n raise ValueError('deg must be a positive integer')\n x = np.cos(np.pi * np.arange(1, 2 * ideg, 2) / (2.0 * ideg))\n w = np.ones(ideg) * (np.pi / ideg)\n return (x, w)", + "docstring": "Gauss-Chebyshev quadrature. Computes the sample points and weights for Gauss-Chebyshev quadrature. These sample points and weights will correctly integrate polynomials of degree :math: or less over the interval :math: with the weight function :math:. Parameters ---------- deg : int Number of sample points and weights. It must be >= 1. Returns ------- x : ndarray 1-D ndarray containing the sample points. y : ndarray 1-D ndarray containing the weights. Notes ----- The results have only been tested up to degree 100, higher degrees may be problematic. For Gauss-Chebyshev there are closed form solutions for the sample points and weights. If n = , then .. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n)) .. math:: w_i = \\pi / n", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\chebyshev.py", + "ast_data": "FunctionDef name:chebgauss arg:deg arguments arg Assign Call If Compare Raise Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "to_numpy_code", + "source_code": "def to_numpy_code(code):\n code = code.lower()\n if code is None:\n return native_code\n if code in aliases['little']:\n return '<'\n elif code in aliases['big']:\n return '>'\n elif code in aliases['native']:\n return native_code\n elif code in aliases['swapped']:\n return swapped_code\n else:\n raise ValueError(f'We cannot handle byte order {code}')", + "docstring": "Convert various order codings to NumPy format. Parameters ---------- code : str The code to convert. It is converted to lower case before parsing. Legal values are: 'little', 'big', 'l', 'b', 'le', 'be', '', 'native', '=', 'swapped', 's'. Returns ------- out_code : {''} Here '' is the code for big endian. Examples -------- >>> import sys >>> from scipy.io.matlab._byteordercodes import to_numpy_code >>> sys_is_le = (sys.byteorder == 'little') >>> sys_is_le True >>> to_numpy_code('big') '>' >>> to_numpy_code('little') '>> nc = to_numpy_code('native') >>> nc == '' True >>> sc = to_numpy_code('swapped') >>> sc == '>' if sys_is_le else sc == '<' True", + "type": "function", + "file_path": "scipy\\scipy\\io\\matlab\\_byteordercodes.py", + "ast_data": "FunctionDef name:to_numpy_code arg:code arguments arg Assign Call If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call" + }, + { + "library": "matplotlib", + "name": "_preprocess_math", + "source_code": "def _preprocess_math(self, s):\n if self.get_usetex():\n if s == ' ':\n s = '\\\\ '\n return (s, 'TeX')\n elif not self.get_parse_math():\n return (s, False)\n elif cbook.is_math_text(s):\n return (s, True)\n else:\n return (s.replace('\\\\$', '$'), False)", + "docstring": "Return the string *s* after mathtext preprocessing, and the kind of mathtext support needed. - If *self* is configured to use TeX, return *s* unchanged except that a single space gets escaped, and the flag \"TeX\". - Otherwise, if *s* is mathtext (has an even number of unescaped dollar signs) and `` is not set to False, return *s* and the flag True. - Otherwise, return *s* with dollar signs unescaped, and the flag False.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:_preprocess_math arg:self arg:s arguments arg arg If Call If Compare Assign Return return:yes If Call Return return:yes If Call Return return:yes Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_matrix", + "source_code": "def get_matrix(self):\n return self.get_affine().get_matrix()", + "docstring": "Get the matrix for the affine part of this transform.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:get_matrix arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "rvs", + "source_code": "def rvs(self, mu=None, kappa=1, size=1, random_state=None):\n dim, mu, kappa = self._process_parameters(mu, kappa)\n random_state = self._get_random_state(random_state)\n samples = self._rvs(dim, mu, kappa, size, random_state)\n return samples", + "docstring": "Draw random samples from a von Mises-Fisher distribution. Parameters ---------- mu : array_like Mean direction of the distribution. Must be a one-dimensional unit vector of norm 1. kappa : float Concentration parameter. Must be positive. size : int or tuple of ints, optional Given a shape of, for example, (m,n,k), m*n*k samples are generated, and packed in an m-by-n-by-k arrangement. Because each sample is N-dimensional, the output shape is (m,n,k,N). If no shape is specified, a single (N-D) sample is returned. random_state : {None, int, np.random.RandomState, np.random.Generator}, optional Used for drawing random variates. If is , the singleton is used. If is an int, a new `seedNonesizeNN` is the dimension of the distribution.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:rvs arg:self arg:mu arg:kappa arg:size arg:random_state arguments arg arg arg arg arg Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "as_symbol", + "source_code": "def as_symbol(obj):\n return Expr(Op.SYMBOL, obj)", + "docstring": "Return object as SYMBOL expression (variable or unparsed expression).", + "type": "function", + "file_path": "numpy\\numpy\\f2py\\symbolic.py", + "ast_data": "FunctionDef name:as_symbol arg:obj arguments arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "_random_lhs", + "source_code": "def _random_lhs(self, n: IntNumber=1) -> np.ndarray:\n if not self.scramble:\n samples: np.ndarray | float = 0.5\n else:\n samples = self.rng.uniform(size=(n, self.d))\n perms = np.tile(np.arange(1, n + 1), (self.d, 1))\n for i in range(self.d):\n self.rng.shuffle(perms[i, :])\n perms = perms.T\n samples = (perms - samples) / n\n return samples", + "docstring": "Base LHS algorithm.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_qmc.py", + "ast_data": "FunctionDef name:_random_lhs arg:self arg:n arguments arg arg If Assign Call Assign Call Call For Call Call Assign Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "fit_transform", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit_transform(self, X, y=None, init=None):\n if self.n_init == 'warn':\n warnings.warn('The default value of `n_init` will change from 4 to 1 in 1.9.', FutureWarning)\n self._n_init = 4\n else:\n self._n_init = self.n_init\n X = validate_data(self, X)\n if X.shape[0] == X.shape[1] and self.dissimilarity != 'precomputed':\n warnings.warn(\"The MDS API has changed. ``fit`` now constructs a dissimilarity matrix from data. To use a custom dissimilarity matrix, set ``dissimilarity='precomputed'``.\")\n if self.dissimilarity == 'precomputed':\n self.dissimilarity_matrix_ = X\n elif self.dissimilarity == 'euclidean':\n self.dissimilarity_matrix_ = euclidean_distances(X)\n self.embedding_, self.stress_, self.n_iter_ = smacof(self.dissimilarity_matrix_, metric=self.metric, n_components=self.n_components, init=init, n_init=self._n_init, n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose, eps=self.eps, random_state=self.random_state, return_n_iter=True, normalized_stress=self.normalized_stress)\n return self.embedding_", + "docstring": "Fit the data from , and returns the embedded coordinates. Parameters ---------- X : array-like of shape (n_samples, n_features) or (n_samples, n_samples) Input data. If ``, the input should be the dissimilarity matrix. y : Ignored Not used, present for API consistency by convention. init : ndarray of shape (n_samples, n_components), default=None Starting configuration of the embedding to initialize the SMACOF algorithm. By default, the algorithm is initialized with a randomly chosen array. Returns ------- X_new : ndarray of shape (n_samples, n_components) X transformed in the new space.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\manifold\\_mds.py", + "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arg:init arguments arg arg arg arg If Compare Call Assign Assign Assign Call If BoolOp Compare Compare Call If Compare Assign If Compare Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_UnsortedSegmentSumGrad", + "source_code": "@ops.RegisterGradient('UnsortedSegmentSum')\ndef _UnsortedSegmentSumGrad(op: ops.Operation, grad):\n return (_GatherDropNegatives(grad, op.inputs[1])[0], None, None)", + "docstring": "Gradient for UnsortedSegmentSum.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_UnsortedSegmentSumGrad arg:op arg:grad arguments arg arg Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "__init__", + "source_code": "def __init__(self, critical_value):\n self.critical_value = critical_value", + "docstring": "DomainGreater(v)(x) = true where x <= v", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:critical_value arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "_assert_positive_definite", + "source_code": "def _assert_positive_definite(self):\n logging.warn('Using (possibly slow) default implementation of assert_positive_definite. Requires conversion to a dense matrix and O(N^3) operations.')\n if self.is_self_adjoint:\n return check_ops.assert_positive(array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense())), message='Matrix was not positive definite.')\n raise NotImplementedError('assert_positive_definite is not implemented.')", + "docstring": "Default implementation of _assert_positive_definite.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:_assert_positive_definite arg:self arguments arg Call If Return return:yes Call Call Call Call Raise Call" + }, + { + "library": "sphinx", + "name": "_md5", + "source_code": "def _md5(data: bytes=b'', **_kw: Any) -> hashlib._Hash:\n import hashlib\n return hashlib.md5(data, usedforsecurity=False)", + "docstring": "Deprecated wrapper around hashlib.md5 To be removed in Sphinx 9.0", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\__init__.py", + "ast_data": "FunctionDef name:_md5 arg:data arguments arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "trim", + "source_code": "def trim(self, correspondence_preserve: bool=False, inplace: bool=False) -> Boxes:\n raise NotImplementedError", + "docstring": "Trim out zero padded boxes. Given box arrangements of shape :math:: == === == === == === == === == -- Box -- Box -- Box -- Box -- -- 0 -- 0 -- Box -- Box -- -- 0 -- Box -- 0 -- 0 -- -- 0 -- 0 -- 0 -- 0 -- == === == === == === == === == Nothing will change if correspondence_preserve is True. Only pure zero layers will be removed, resulting in shape :math:: == === == === == === == === == -- Box -- Box -- Box -- Box -- -- 0 -- 0 -- Box -- Box -- -- 0 -- Box -- 0 -- 0 -- == === == === == === == === == Otherwise, you will get :math:: == === == === == === == === == -- Box -- Box -- Box -- Box -- -- 0 -- Box -- Box -- Box -- == === == === == === == === ==", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\boxes.py", + "ast_data": "FunctionDef name:trim arg:self arg:correspondence_preserve arg:inplace arguments arg arg arg Raise" + }, + { + "library": "pytorch", + "name": "enable_cuda_sanitizer", + "source_code": "def enable_cuda_sanitizer():\n cuda_sanitizer.enable()", + "docstring": "Enable CUDA Sanitizer. The sanitizer will begin to analyze low-level CUDA calls invoked by torch functions for synchronization errors. All data races found will be printed to the standard error output along with stack traces of suspected causes. For best results, the sanitizer should be enabled at the very beginning of the program.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\_sanitizer.py", + "ast_data": "FunctionDef name:enable_cuda_sanitizer arguments Call" + }, + { + "library": "numpy", + "name": "test", + "source_code": "@click.option('-m', 'markexpr', metavar='MARKEXPR', default=default, help='Run tests with the given markers')\n@spin.util.extend_command(spin.cmds.meson.test)\ndef test(*, parent_callback, pytest_args, tests, markexpr, **kwargs):\n if not pytest_args and (not tests):\n pytest_args = ('--pyargs', 'numpy')\n if '-m' not in pytest_args:\n if markexpr != 'full':\n pytest_args = ('-m', markexpr) + pytest_args\n kwargs['pytest_args'] = pytest_args\n parent_callback(**{'pytest_args': pytest_args, 'tests': tests, **kwargs})", + "docstring": "By default, spin will run . To run the full test suite, use", + "type": "function", + "file_path": "numpy\\.spin\\cmds.py", + "ast_data": "FunctionDef name:test arguments arg arg arg arg arg If BoolOp Assign If Compare If Compare Assign Assign Call Call Call" + }, + { + "library": "django", + "name": "local", + "source_code": "@property\ndef local(self):\n return bool(capi.islocal(self.ptr))", + "docstring": "Return True if this SpatialReference is local (root node is LOCAL_CS).", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py", + "ast_data": "FunctionDef name:local arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, primals, tangents):\n self._accumulator = pywrap_tfe.TFE_Py_ForwardAccumulatorNew(False)\n self._recording = False\n primal_ids = set()\n for primal in nest.flatten(primals):\n if id(primal) in primal_ids:\n raise ValueError('Tensor {} was specified as a primal multiple times. This may indicate an error. If it was intended, please sum the corresponding tangents.')\n primal_ids.add(id(primal))\n self._watch(primals, tangents)", + "docstring": "Specify tensors to watch and their Jacobian-vector products. Mathematically, is a vector right-multiplying the Jacobian matrix (a Jacobian-vector product) for the function computed while this accumulator is active. Since JVPs are computed in forward mode as the computation happens, this vector must be supplied in advance. Listing a single tensor multiple times in raises an exception. Excluding a tensor from is equivalent to watching it with a tangent tensor of zeros. Args: primals: A tensor or nested structure of tensors to watch. tangents: A tensor or nested structure of tensors, with the same nesting structure as , with each element being a vector with the same size as the corresponding primal element. Raises: ValueError: If the same tensor or variable is specified multiple times in .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\forwardprop.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:primals arg:tangents arguments arg arg arg Assign Call Assign Assign Call For Call If Compare Call Raise Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_standardize_and_copy_config", + "source_code": "def _standardize_and_copy_config(config):\n kwargs = config.copy()\n for k, v in kwargs.items():\n if isinstance(v, list):\n kwargs[k] = tuple(v)\n return kwargs", + "docstring": "Returns a shallow copy of config with lists turned to tuples. Keras serialization uses nest to listify everything. This causes problems with the NumericColumn shape, which becomes unhashable. We could try to solve this on the Keras side, but that would require lots of tracking to avoid changing existing behavior. Instead, we ensure here that we revive correctly. Args: config: dict that will be used to revive a Feature Column Returns: Shallow copy of config with lists turned to tuples.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:_standardize_and_copy_config arg:config arguments arg Assign Call For Call If Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "get_reverse_path_info", + "source_code": "def get_reverse_path_info(self, filtered_relation=None):\n opts = self.model._meta\n from_opts = self.remote_field.model._meta\n return [PathInfo(from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self.remote_field, m2m=not self.unique, direct=False, filtered_relation=filtered_relation)]", + "docstring": "Get path from the related model to this field's model.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\related.py", + "ast_data": "FunctionDef name:get_reverse_path_info arg:self arg:filtered_relation arguments arg arg Assign Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "Dirs", + "source_code": "@dataclass\nclass Dirs:\n root: Path\n build: Path\n installed: Path\n site: Path\n\n def __init__(self, args=None):\n self.root = Path(__file__).parent.absolute()\n if not args:\n return\n self.build = Path(args.build_dir).resolve()\n if args.install_prefix:\n self.installed = Path(args.install_prefix).resolve()\n else:\n self.installed = self.build.parent / (self.build.stem + '-install')\n self.site = self.get_site_packages()\n\n def add_sys_path(self):\n site_dir = str(self.site)\n sys.path.insert(0, site_dir)\n os.environ['PYTHONPATH'] = os.pathsep.join((site_dir, os.environ.get('PYTHONPATH', '')))\n\n def get_site_packages(self):\n if sys.version_info >= (3, 12):\n plat_path = Path(sysconfig.get_path('platlib'))\n elif 'deb_system' in sysconfig.get_scheme_names():\n plat_path = Path(sysconfig.get_path('platlib', 'deb_system'))\n else:\n plat_path = Path(sysconfig.get_path('platlib'))\n return self.installed / plat_path.relative_to(sys.exec_prefix)", + "docstring": "root: Directory where scr, build config and tools are located (and this file) build: Directory where build output files (i.e. *.o) are saved install: Directory where .so from build and .py from src are put together. site: Directory where the built SciPy version was installed. This is a custom prefix, followed by a relative path matching the one the system would use for the site-packages of the active Python interpreter.", + "type": "class", + "file_path": "scipy\\dev.py", + "ast_data": "ClassDef name:Dirs FunctionDef name:__init__ arg:self arg:args arguments arg arg Assign Call Call If Return return:no Assign Call Call If Assign Call Call Assign Assign Call FunctionDef name:add_sys_path arg:self arguments arg Assign Call Call Assign Call Call FunctionDef name:get_site_packages arg:self arguments arg If Compare Assign Call Call If Compare Call Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "write_file", + "source_code": "def write_file(self, name: str, file_path: str) -> None:\n assert os.path.isfile(file_path), f'{file_path} is not a valid file path'\n with open(file_path, 'rb') as f:\n file_bytes = f.read()\n self.write_bytes(name, file_bytes)", + "docstring": "Copy a file into the archive. name: The destination file inside the archive. file_path: The source file on disk.", + "type": "method", + "file_path": "pytorch\\torch\\export\\pt2_archive\\_package.py", + "ast_data": "FunctionDef name:write_file arg:self arg:name arg:file_path arguments arg arg arg Call With Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "export_meta_graph", + "source_code": "def export_meta_graph(obj, filename: str, signatures=None, options: save_options.SaveOptions=None):\n options = options or save_options.SaveOptions()\n export_dir = os.path.dirname(filename)\n meta_graph_def, exported_graph, _, _, _, _ = _build_meta_graph(obj, signatures, options)\n file_io.atomic_write_string_to_file(filename, meta_graph_def.SerializeToString(deterministic=True))\n if options.save_debug_info:\n _export_debug_info(exported_graph, export_dir)\n ops.dismantle_graph(exported_graph)", + "docstring": "Exports the MetaGraph proto of the to a file. This function goes through the same procedures saved_model.save goes to produce the given object's MetaGraph, then saves it to the given file. It skips saving checkpoint information, and is useful when all one wants is the graph defining the model. Args: obj: A trackable object to build the MetaGraph from. filename: The file into which to write the MetaGraph. signatures: Optional, either a with an input signature specified or the result of on a -decorated function , in which case will be used to generate a signature for the SavedModel under the default serving signature key. may also be a dictionary, in which case it maps from signature keys to either instances with input signatures or concrete functions. The keys of such a dictionary may be arbitrary strings, but will typically be from the module. options: Optional, object that specifies options for saving.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py", + "ast_data": "FunctionDef name:export_meta_graph arg:obj arg:filename arg:signatures arg:options arguments arg arg arg arg Assign BoolOp Call Assign Call Assign Call Call Call If Call Call" + }, + { + "library": "pygame", + "name": "remove_internal", + "source_code": "def remove_internal(self, sprite):\n lost_rect = self.spritedict[sprite]\n if lost_rect:\n self.lostsprites.append(lost_rect)\n del self.spritedict[sprite]", + "docstring": "For removing a sprite from this group internally. :param sprite: The sprite we are removing.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:remove_internal arg:self arg:sprite arguments arg arg Assign If Call" + }, + { + "library": "django", + "name": "print_help", + "source_code": "def print_help(self, prog_name, subcommand):\n parser = self.create_parser(prog_name, subcommand)\n parser.print_help()", + "docstring": "Print the help message for this command, derived from ``.", + "type": "method", + "file_path": "django\\django\\core\\management\\base.py", + "ast_data": "FunctionDef name:print_help arg:self arg:prog_name arg:subcommand arguments arg arg arg Assign Call Call" + }, + { + "library": "matplotlib", + "name": "get_fontsize", + "source_code": "def get_fontsize(self):\n return self._fontproperties.get_size_in_points()", + "docstring": "Return the font size as an integer. See Also -------- .font_manager.FontProperties.get_size_in_points", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:get_fontsize arg:self arguments arg Return return:yes Call" + }, + { + "library": "seaborn", + "name": "standard_scale", + "source_code": "@staticmethod\ndef standard_scale(data2d, axis=1):\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n subtract = standardized.min()\n standardized = (standardized - subtract) / (standardized.max() - standardized.min())\n if axis == 1:\n return standardized\n else:\n return standardized.T", + "docstring": "Divide the data by the difference between the max and min Parameters ---------- data2d : pandas.DataFrame Data to normalize axis : int Which axis to normalize across. If 0, normalize across rows, if 1, normalize across columns. Returns ------- standardized : pandas.DataFrame Noramlized data with a mean of 0 and variance of 1 across the specified axis.", + "type": "method", + "file_path": "seaborn\\seaborn\\matrix.py", + "ast_data": "FunctionDef name:standard_scale arg:data2d arg:axis arguments arg arg If Compare Assign Assign Assign Call Assign Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "lambda_auto_wrap_policy", + "source_code": "def lambda_auto_wrap_policy(module: nn.Module, recurse: bool, nonwrapped_numel: int, lambda_fn: Callable) -> bool:\n if recurse:\n return True\n return lambda_fn(module)", + "docstring": "A convenient auto wrap policy to wrap submodules based on an arbitrary user function. If wrapper_cls_recursive_wrap`, then this module will be wrapped.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py", + "ast_data": "FunctionDef name:lambda_auto_wrap_policy arg:module arg:recurse arg:nonwrapped_numel arg:lambda_fn arguments arg arg arg arg If Return return:yes Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_adjustable", + "source_code": "def set_adjustable(self, adjustable, share=False):\n _api.check_in_list(['box', 'datalim'], adjustable=adjustable)\n if share:\n axs = {sibling for name in self._axis_names for sibling in self._shared_axes[name].get_siblings(self)}\n else:\n axs = [self]\n if adjustable == 'datalim' and any((getattr(ax.get_data_ratio, '__func__', None) != _AxesBase.get_data_ratio for ax in axs)):\n raise ValueError(\"Cannot set Axes adjustable to 'datalim' for Axes which override 'get_data_ratio'\")\n for ax in axs:\n ax._adjustable = adjustable\n self.stale = True", + "docstring": "Set how the Axes adjusts to achieve the required aspect ratio. Parameters ---------- adjustable : {'box', 'datalim'} If 'box', change the physical dimensions of the Axes. If 'datalim', change the ``, apply the settings to all shared Axes. See Also -------- matplotlib.axes.Axes.set_aspect For a description of aspect handling. Notes ----- Shared Axes (of which twinned Axes are a special case) impose restrictions on how aspect ratios can be imposed. For twinned Axes, use 'datalim'. For Axes that share both x and y, use 'box'. Otherwise, either 'datalim' or 'box' may be used. These limitations are partly a requirement to avoid over-specification, and partly a result of the particular implementation we are currently using, in which the adjustments for aspect ratios are done sequentially and independently on each Axes as it is drawn.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:set_adjustable arg:self arg:adjustable arg:share arguments arg arg arg Call If Assign Call Assign If BoolOp Compare Call Compare Call Raise Call For Assign Assign" + }, + { + "library": "tensorflow", + "name": "dropout_v2", + "source_code": "@dispatch.dispatch_for_api(nn_ops.dropout_v2)\ndef dropout_v2(x: ragged_tensor.Ragged, rate, noise_shape=None, seed=None, name=None):\n if noise_shape is not None:\n raise ValueError('noise_shape is not supported yet for RaggedTensor x')\n with ops.name_scope(name, 'RaggedNNDropout', [x, rate]):\n x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x')\n return x.with_flat_values(nn_ops.dropout_v2(x.flat_values, rate=rate, seed=seed))", + "docstring": "Ragged dispatch target for tf.nn.dropout.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py", + "ast_data": "FunctionDef name:dropout_v2 arg:x arg:rate arg:noise_shape arg:seed arg:name arguments arg arg arg arg arg If Compare Raise Call With Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "Qing", + "source_code": "class Qing(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-500.0] * self.N, [500.0] * self.N))\n self.custom_bounds = [(-2, 2), (-2, 2)]\n self.global_optimum = [[sqrt(_) for _ in range(1, self.N + 1)]]\n self.fglob = 0\n\n def fun(self, x, *args):\n self.nfev += 1\n i = arange(1, self.N + 1)\n return sum((x ** 2.0 - i) ** 2.0)", + "docstring": "Qing objective function. This class defines the Qing [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Qing}}(x) = \\sum_{i=1}^{n} (x_i^2 - i)^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_Q.py", + "ast_data": "ClassDef name:Qing Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "read_file", + "source_code": "def read_file(filename: Optional[str]=None) -> bool:\n if filename is None:\n filename = get_filename()\n return torch._C._cuda_tunableop_read_file(filename)", + "docstring": "Read results from a TunableOp CSV file. If :attr: is not given, `` is called.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\tunable.py", + "ast_data": "FunctionDef name:read_file arg:filename arguments arg If Compare Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_add_attribute", + "source_code": "def _add_attribute(node: _C.Node, key: str, value: Any, aten: bool):\n m = _ATTR_PATTERN.match(key)\n if m is None:\n raise ValueError(f\"Invalid attribute specifier '{key}' names must be suffixed with type, e.g. 'dim_i' or 'dims_i'\")\n name, kind = (m.group(1), m.group(2))\n if _is_onnx_list(value):\n kind += 's'\n return getattr(node, f'{kind}_')(name, value)", + "docstring": "Initializes the right attribute based on type of value.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\jit_utils.py", + "ast_data": "FunctionDef name:_add_attribute arg:node arg:key arg:value arg:aten arguments arg arg arg arg Assign Call If Compare Raise Call Assign Call Call If Call Return return:yes Call Call" + }, + { + "library": "scrapy", + "name": "_request_deferred", + "source_code": "def _request_deferred(request: Request) -> defer.Deferred[Any]:\n request_callback = request.callback\n request_errback = request.errback\n\n def _restore_callbacks(result: Any) -> Any:\n request.callback = request_callback\n request.errback = request_errback\n return result\n d: defer.Deferred[Any] = defer.Deferred()\n d.addBoth(_restore_callbacks)\n if request.callback:\n d.addCallback(request.callback)\n if request.errback:\n d.addErrback(request.errback)\n request.callback, request.errback = (d.callback, d.errback)\n return d", + "docstring": "Wrap a request inside a Deferred. This function is harmful, do not use it until you know what you are doing. This returns a Deferred whose first pair of callbacks are the request callback and errback. The Deferred also triggers when the request callback/errback is executed (i.e. when the request is downloaded) WARNING: Do not call request.replace() until after the deferred is called.", + "type": "function", + "file_path": "scrapy\\scrapy\\shell.py", + "ast_data": "FunctionDef name:_request_deferred arg:request arguments arg Assign Assign FunctionDef name:_restore_callbacks arg:result arguments arg Assign Assign Return return:yes Call Call If Call If Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "load_observer_state_dict", + "source_code": "def load_observer_state_dict(mod, obs_dict):\n missing_keys: list[str] = []\n unexpected_keys: list[str] = []\n for name, module in mod.named_modules():\n prefix = name + '.'\n if _is_activation_post_process(module):\n if _is_per_channel_script_obs_instance(module):\n module._load_from_state_dict_script(obs_dict, prefix, {}, True, missing_keys, unexpected_keys, [])\n else:\n module._load_from_state_dict(obs_dict, prefix, {}, False, missing_keys, unexpected_keys, [])\n for k in missing_keys:\n if 'observer' in k or 'activation_post_process' in k:\n raise Exception(f'Missing keys for observer {k} in state_dict')\n for k in unexpected_keys:\n if 'observer' in k or 'activation_post_process' in k:\n raise Exception(f'Unexpected keys for observer {k} in state_dict')", + "docstring": "Given input model and a state_dict containing model observer stats, load the stats back into the model. The observer state_dict can be saved using torch.ao.quantization.get_observer_state_dict", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\observer.py", + "ast_data": "FunctionDef name:load_observer_state_dict arg:mod arg:obs_dict arguments arg arg For Call Assign If Call If Call Call Call For If BoolOp Compare Compare Raise Call For If BoolOp Compare Compare Raise Call" + }, + { + "library": "pytorch", + "name": "record_event", + "source_code": "def record_event(self, event=None):\n if event is None:\n event = Event()\n event.record(self)\n return event", + "docstring": "Record an event. Args: event (torch.xpu.Event, optional): event to record. If not given, a new one will be allocated. Returns: Recorded event.", + "type": "method", + "file_path": "pytorch\\torch\\xpu\\streams.py", + "ast_data": "FunctionDef name:record_event arg:self arg:event arguments arg arg If Compare Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "weight_is_statically_quantized", + "source_code": "def weight_is_statically_quantized(qconfig):\n return weight_dtype(qconfig) in [torch.quint8, torch.qint8, torch.uint8, torch.int8]", + "docstring": "Given a qconfig, decide if the weight needs to be statically quantized or not", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\utils.py", + "ast_data": "FunctionDef name:weight_is_statically_quantized arg:qconfig arguments arg Return return:yes Compare Call" + }, + { + "library": "django", + "name": "get_relations", + "source_code": "def get_relations(self, cursor, table_name):\n cursor.execute(\"\\n SELECT a1.attname, c2.relname, a2.attname\\n FROM pg_constraint con\\n LEFT JOIN pg_class c1 ON con.conrelid = c1.oid\\n LEFT JOIN pg_class c2 ON con.confrelid = c2.oid\\n LEFT JOIN\\n pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]\\n LEFT JOIN\\n pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]\\n WHERE\\n c1.relname = %s AND\\n con.contype = 'f' AND\\n c1.relnamespace = c2.relnamespace AND\\n pg_catalog.pg_table_is_visible(c1.oid)\\n \", [table_name])\n return {row[0]: (row[2], row[1]) for row in cursor.fetchall()}", + "docstring": "Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all foreign keys in the given table.", + "type": "method", + "file_path": "django\\django\\db\\backends\\postgresql\\introspection.py", + "ast_data": "FunctionDef name:get_relations arg:self arg:cursor arg:table_name arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "StretchedV", + "source_code": "class StretchedV(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-10] * self.N, [10] * self.N))\n self.global_optimum = [[0, 0]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n t = x[1:] ** 2 + x[:-1] ** 2\n return sum(t ** 0.25 * sin(50.0 * t ** 0.1 + 1) ** 2)", + "docstring": "StretchedV objective function. This class defines the Stretched V [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{StretchedV}}(x) = \\sum_{i=1}^{n-1} t^{1/4} [\\sin (50t^{0.1}) + 1]^2 Where, in this exercise: .. math:: t = x_{i+1}^2 + x_i^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: when :math:. .. [1] Adorio, E. MVF - \"Multivariate Test Functions Library in C for Unconstrained Global Optimization\", 2005 TODO All the sources disagree on the equation, in some the 1 is in the brackets, in others it is outside. In Jamil#142 it's not even 1. Here we go with the Adorio option.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", + "ast_data": "ClassDef name:StretchedV Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call" + }, + { + "library": "seaborn", + "name": "reordered_ind", + "source_code": "@property\ndef reordered_ind(self):\n return self.dendrogram['leaves']", + "docstring": "Indices of the matrix, reordered by the dendrogram", + "type": "method", + "file_path": "seaborn\\seaborn\\matrix.py", + "ast_data": "FunctionDef name:reordered_ind arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "read_sparse_array", + "source_code": "def read_sparse_array(self, hdr):\n res = self.read_sub_array(hdr)\n tmp = res[:-1, :]\n dims = (int(res[-1, 0]), int(res[-1, 1]))\n I = np.ascontiguousarray(tmp[:, 0], dtype='intc')\n J = np.ascontiguousarray(tmp[:, 1], dtype='intc')\n I -= 1\n J -= 1\n if res.shape[1] == 3:\n V = np.ascontiguousarray(tmp[:, 2], dtype='float')\n else:\n V = np.ascontiguousarray(tmp[:, 2], dtype='complex')\n V.imag = tmp[:, 3]\n return scipy.sparse.coo_array((V, (I, J)), dims)", + "docstring": "Read and return sparse matrix type Parameters ---------- hdr : `` field set to True; the fact that the data are complex is only detectable because there are 4 storage columns.", + "type": "method", + "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py", + "ast_data": "FunctionDef name:read_sparse_array arg:self arg:hdr arguments arg arg Assign Call Assign Assign Call Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Return return:yes Call" + }, + { + "library": "django", + "name": "generate_removed_fields", + "source_code": "def generate_removed_fields(self):\n for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):\n self._generate_removed_field(app_label, model_name, field_name)", + "docstring": "Make RemoveField operations.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\autodetector.py", + "ast_data": "FunctionDef name:generate_removed_fields arg:self arguments arg For Call Call" + }, + { + "library": "tensorflow", + "name": "CancellationManager", + "source_code": "class CancellationManager(object):\n __slots__ = ['_impl']\n\n def __init__(self):\n self._impl = pywrap_tfe.TFE_NewCancellationManager()\n\n @property\n def is_cancelled(self):\n return pywrap_tfe.TFE_CancellationManagerIsCancelled(self._impl)\n\n def start_cancel(self):\n pywrap_tfe.TFE_CancellationManagerStartCancel(self._impl)\n\n def get_cancelable_function(self, concrete_function):\n\n def cancellable(*args, **kwargs):\n with CancellationManagerContext(self):\n return concrete_function(*args, **kwargs)\n return cancellable", + "docstring": "A mechanism for cancelling blocking computation.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\eager\\cancellation.py", + "ast_data": "ClassDef name:CancellationManager Assign FunctionDef name:__init__ arg:self arguments arg Assign Call FunctionDef name:is_cancelled arg:self arguments arg Return return:yes Call FunctionDef name:start_cancel arg:self arguments arg Call FunctionDef name:get_cancelable_function arg:self arg:concrete_function arguments arg arg FunctionDef name:cancellable arguments arg arg With Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "cosine_distance", + "source_code": "@tf_export(v1=['losses.cosine_distance'])\n@dispatch.add_dispatch_support\n@deprecated_args(None, 'dim is deprecated, use axis instead', 'dim')\ndef cosine_distance(labels, predictions, axis=None, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS, dim=None):\n axis = deprecated_argument_lookup('axis', axis, 'dim', dim)\n if axis is None:\n raise ValueError('You must specify argument `axis`.')\n if labels is None:\n raise ValueError('Argument `labels` must not be None.')\n if predictions is None:\n raise ValueError('Argument `predictions` must not be None.')\n with ops.name_scope(scope, 'cosine_distance_loss', (predictions, labels, weights)) as scope:\n predictions = math_ops.cast(predictions, dtype=dtypes.float32)\n labels = math_ops.cast(labels, dtype=dtypes.float32)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n radial_diffs = math_ops.multiply(predictions, labels)\n losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(axis,), keepdims=True)\n return compute_weighted_loss(losses, weights, scope, loss_collection, reduction=reduction)", + "docstring": "Adds a cosine-distance loss to the training procedure. Note that the function assumes that and are already unit-normalized. Args: labels: whose shape matches 'predictions' predictions: An arbitrary matrix. axis: The dimension along which the cosine distance is computed. weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which this loss will be added. reduction: Type of reduction to apply to loss. dim: The old (deprecated) name for . Returns: Weighted loss float . If is , this has the same shape as ; otherwise, it is scalar. Raises: ValueError: If shape doesn't match shape, or , , or is . @compatibility(eager) The argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a . @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\losses_impl.py", + "ast_data": "FunctionDef name:cosine_distance arg:labels arg:predictions arg:axis arg:weights arg:scope arg:loss_collection arg:reduction arg:dim arguments arg arg arg arg arg arg arg arg Assign Call If Compare Raise Call If Compare Raise Call If Compare Raise Call With Call Assign Call Assign Call Call Call Call Assign Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "get_cpu_capability", + "source_code": "def get_cpu_capability() -> str:\n return torch._C._get_cpu_capability()", + "docstring": "Return cpu capability as a string value. Possible values: - \"DEFAULT\" - \"VSX\" - \"Z VECTOR\" - \"NO AVX\" - \"AVX2\" - \"AVX512\" - \"SVE256\"", + "type": "function", + "file_path": "pytorch\\torch\\backends\\cpu\\__init__.py", + "ast_data": "FunctionDef name:get_cpu_capability arguments Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "set", + "source_code": "def set(self, y: Array | complex, /, copy: bool | None=None, xp: ModuleType | None=None) -> Array:\n return self._op(_AtOp.SET, None, None, y, copy=copy, xp=xp)", + "docstring": "Apply `` and return the update array.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_at.py", + "ast_data": "FunctionDef name:set arg:copy arg:xp arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_per_worker_dataset", + "source_code": "def get_per_worker_dataset(dataset_or_dataset_fn, coordinator):\n if callable(dataset_or_dataset_fn):\n return PerWorkerDatasetFromDatasetFunction(dataset_or_dataset_fn, coordinator)\n else:\n return PerWorkerDatasetFromDataset(dataset_or_dataset_fn, coordinator)", + "docstring": "Returns a per-worker dataset from a dataset or a dataset function.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py", + "ast_data": "FunctionDef name:get_per_worker_dataset arg:dataset_or_dataset_fn arg:coordinator arguments arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "get_default", + "source_code": "@staticmethod\n@functools.lru_cache\ndef get_default():\n from django.template import engines\n from django.template.backends.django import DjangoTemplates\n for engine in engines.all():\n if isinstance(engine, DjangoTemplates):\n return engine.engine\n raise ImproperlyConfigured('No DjangoTemplates backend is configured.')", + "docstring": "Return the first DjangoTemplates backend that's configured, or raise ImproperlyConfigured if none are configured. This is required for preserving historical APIs that rely on a globally available, implicitly configured engine such as: >>> from django.template import Context, Template >>> template = Template(\"Hello {{ name }}!\") >>> context = Context({'name': \"world\"}) >>> template.render(context) 'Hello world!'", + "type": "method", + "file_path": "django\\django\\template\\engine.py", + "ast_data": "FunctionDef name:get_default arguments For Call If Call Return return:yes Raise Call" + }, + { + "library": "tensorflow", + "name": "ctc_beam_search_decoder_v2", + "source_code": "@tf_export('nn.ctc_beam_search_decoder', v1=['nn.ctc_beam_search_decoder_v2'])\n@dispatch.add_dispatch_support\ndef ctc_beam_search_decoder_v2(inputs, sequence_length, beam_width=100, top_paths=1):\n return ctc_beam_search_decoder(inputs, sequence_length=sequence_length, beam_width=beam_width, top_paths=top_paths, merge_repeated=False)", + "docstring": "Performs beam search decoding on the logits given in input. **Note** Although in general greedy search is a special case of beam-search with and , differs from in the treatment of blanks when computing the probability of a sequence: - treats blanks as sequence termination - treats blanks as regular elements Args: inputs: 3-D , size . The logits. sequence_length: 1-D vector containing sequence lengths, having size . beam_width: An int scalar >= 0 (beam search beam width). top_paths: An int scalar >= 0, <= beam_width (controls output size). Returns: A tuple where decoded: A list of length top_paths, where is a containing the decoded outputs: : Indices matrix ; The rows store: . : Values vector, size . The vector stores the decoded classes for beam . : Shape vector, size . The shape values are: . log_probability: A matrix containing sequence log-probabilities.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ctc_ops.py", + "ast_data": "FunctionDef name:ctc_beam_search_decoder_v2 arg:inputs arg:sequence_length arg:beam_width arg:top_paths arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "sort", + "source_code": "def sort(self, key=None, reverse=False):\n self[:] = sorted(self, key=key, reverse=reverse)", + "docstring": "Standard list sort method", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py", + "ast_data": "FunctionDef name:sort arg:self arg:key arg:reverse arguments arg arg arg Assign Call" + }, + { + "library": "tensorflow", + "name": "to_proto", + "source_code": "def to_proto(self):\n return saved_object_graph_pb2.SavedUserObject(identifier=self.identifier, version=versions_pb2.VersionDef(producer=self.version, min_consumer=self._min_consumer_version, bad_consumers=self._bad_consumers))", + "docstring": "Create a SavedUserObject proto.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\revived_types.py", + "ast_data": "FunctionDef name:to_proto arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "get_fill", + "source_code": "def get_fill(self):\n return not cbook._str_lower_equal(self._original_facecolor, 'none')", + "docstring": "Return whether face is colored.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:get_fill arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "is_view", + "source_code": "def is_view(op: torch._ops.OpOverload) -> bool:\n return any((a.alias_info is not None for a in op._schema.arguments))", + "docstring": "Does this op overload have aliasing", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\utils.py", + "ast_data": "FunctionDef name:is_view arg:op arguments arg Return return:yes Call Compare" + }, + { + "library": "matplotlib", + "name": "number_of_parameters", + "source_code": "@staticmethod\n@cache\ndef number_of_parameters(func):\n return len(inspect.signature(func).parameters)", + "docstring": "Return number of parameters of the callable *func*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:number_of_parameters arg:func arguments arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "_scale_channel", + "source_code": "def _scale_channel(im: Tensor) -> Tensor:\n min_ = im.min()\n max_ = im.max()\n if min_.item() < 0.0 and (not torch.isclose(min_, torch.as_tensor(0.0, dtype=min_.dtype))):\n raise ValueError(f'Values in the input tensor must greater or equal to 0.0. Found {min_.item()}.')\n if max_.item() > 1.0 and (not torch.isclose(max_, torch.as_tensor(1.0, dtype=max_.dtype))):\n raise ValueError(f'Values in the input tensor must lower or equal to 1.0. Found {max_.item()}.')\n ndims = len(im.shape)\n if ndims not in (2, 3):\n raise TypeError(f'Input tensor must have 2 or 3 dimensions. Found {ndims}.')\n im = im * 255.0\n histo = _torch_histc_cast(im, bins=256, min=0, max=255)\n nonzero_histo = torch.reshape(histo[histo != 0], [-1])\n step = torch.div(torch.sum(nonzero_histo) - nonzero_histo[-1], 255, rounding_mode='trunc')\n if step == 0:\n result = im\n else:\n result = torch.gather(_build_lut(histo, step), 0, im.flatten().long())\n result = result.reshape_as(im)\n return result / 255.0", + "docstring": "Scale the data in the channel to implement equalize. Args: im: image tensor with shapes like :math: or :math:. Returns: image tensor with the batch in the zero position.", + "type": "function", + "file_path": "kornia\\kornia\\enhance\\adjust.py", + "ast_data": "FunctionDef name:_scale_channel arg:im arguments arg Assign Call Assign Call If BoolOp Compare Call Call Call Raise Call Call If BoolOp Compare Call Call Call Raise Call Call Assign Call If Compare Raise Call Assign Assign Call Assign Call Compare Assign Call Call If Compare Assign Assign Call Call Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "env", + "source_code": "@tf_export('__internal__.distribute.combinations.env', v1=[])\ndef env():\n return _env", + "docstring": "Returns the object holds the test environment information. Tests should modify this in the main process if needed, and it will be passed to the worker processes each time a test case is run. Returns: a TestEnvironment object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py", + "ast_data": "FunctionDef name:env arguments Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_merge_node_names", + "source_code": "def _get_merge_node_names(self, device_name):\n if device_name not in self._device_names:\n raise ValueError('Invalid device name: %s' % device_name)\n if not hasattr(self, '_merge_node_names'):\n self._merge_node_names = {}\n if device_name not in self._merge_node_names:\n debug_graph = self._debug_graphs[device_name]\n self._merge_node_names[device_name] = [node for node in debug_graph.node_op_types if debug_graph.node_op_types[node] == 'Merge']\n return self._merge_node_names[device_name]", + "docstring": "Lazily get a list of Merge nodes on a given device.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", + "ast_data": "FunctionDef name:_get_merge_node_names arg:self arg:device_name arguments arg arg If Compare Raise Call If Call Assign If Compare Assign Assign Compare Return return:yes" + }, + { + "library": "tensorflow", + "name": "_create_placeholder_helper", + "source_code": "def _create_placeholder_helper(self, graph: Any, tensor: core.Tensor, name: str):\n placeholder = self._by_val_internal.get(id(tensor))\n if placeholder is None:\n tracing_ctx = trace_type.InternalTracingContext()\n spec = trace_type.from_value(tensor, tracing_ctx)\n spec._name = name\n if isinstance(tensor, core.Value) and tensor.is_packed:\n composite_device_name = tensor.device\n else:\n composite_device_name = None\n placeholder_ctx = trace_type.InternalPlaceholderContext(graph, with_none_control_dependencies=True, composite_device_name=composite_device_name)\n placeholder = spec.placeholder_value(placeholder_ctx)\n self.add_or_replace(key=id(tensor), external=tensor, internal=placeholder, is_by_ref=False)\n graph.inputs.append(placeholder)\n placeholder._record_tape(tensor)\n return placeholder", + "docstring": "A helper function to create capture placeholder.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\capture_container.py", + "ast_data": "FunctionDef name:_create_placeholder_helper arg:self arg:graph arg:tensor arg:name arguments arg arg arg arg Assign Call Call If Compare Assign Call Assign Call Assign If BoolOp Call Assign Assign Assign Call Assign Call Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "set_soft_device_placement", + "source_code": "@tf_export('config.set_soft_device_placement')\ndef set_soft_device_placement(enabled):\n context.context().soft_device_placement = enabled", + "docstring": "Enable or disable soft device placement. If enabled, ops can be placed on different devices than the device explicitly assigned by the user. This potentially has a large performance cost due to an increase in data communication between devices. Some cases where soft_device_placement would modify device assignment are: 1. no GPU/TPU implementation for the OP 2. no GPU devices are known or registered 3. need to co-locate with reftype input(s) which are from CPU 4. an OP can not be compiled by XLA. Common for TPU which always requires the XLA compiler. For TPUs, if this option is true, a feature called automatic outside compilation is enabled. Automatic outside compilation will move uncompilable ops within a TPU program to instead run on the host. This can be used when encountering compilation failures due to unsupported ops. Note: by default soft device placement is enabled when running in eager mode (for convenience) and disabled in graph mode (for performance). Args: enabled: A boolean indicating whether to enable soft placement.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py", + "ast_data": "FunctionDef name:set_soft_device_placement arg:enabled arguments arg Assign Call Call" + }, + { + "library": "tensorflow", + "name": "get_iterator_id_fn", + "source_code": "def get_iterator_id_fn(unused_dummy):\n return script_ops.numpy_function(generator_state.get_next_id, args, dtypes.int64)", + "docstring": "Creates a unique for each pass over the dataset. The returned disambiguates between multiple concurrently existing iterators. Args: unused_dummy: Ignored value. Returns: A tensor whose value uniquely identifies an iterator in .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\from_generator_op.py", + "ast_data": "FunctionDef name:get_iterator_id_fn arg:unused_dummy arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "filter", + "source_code": "def filter(self, *args, **kwargs):\n self._not_support_combined_queries('filter')\n return self._filter_or_exclude(False, args, kwargs)", + "docstring": "Return a new QuerySet instance with the args ANDed to the existing set.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:filter arg:self arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "deprecated_endpoints", + "source_code": "def deprecated_endpoints(*args):\n\n def deprecated_wrapper(func):\n if '_tf_deprecated_api_names' in func.__dict__:\n raise DeprecatedNamesAlreadySetError(f'Cannot set deprecated names for {func.__name__} to {args}. Deprecated names are already set to {func._tf_deprecated_api_names}.')\n func._tf_deprecated_api_names = args\n return func\n return deprecated_wrapper", + "docstring": "Decorator for marking endpoints deprecated. This decorator does not print deprecation messages. TODO(annarev): eventually start printing deprecation warnings when @deprecation_endpoints decorator is added. Args: *args: Deprecated endpoint names. Returns: A function that takes symbol as an argument and adds _tf_deprecated_api_names to that symbol. _tf_deprecated_api_names would be set to a list of deprecated endpoint names for the symbol.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py", + "ast_data": "FunctionDef name:deprecated_endpoints arguments arg FunctionDef name:deprecated_wrapper arg:func arguments arg If Compare Raise Call Assign Return return:yes Return return:yes" + }, + { + "library": "cherrypy", + "name": "footer", + "source_code": "def footer(self):\n return ''", + "docstring": "Render HTML layout footer.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\tutorial\\tut08_generators_and_yield.py", + "ast_data": "FunctionDef name:footer arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_instruction_by_offset", + "source_code": "def _get_instruction_by_offset(offset_to_inst: dict[int, Instruction], offset: int):\n for n in (0, 2, 4, 6):\n if offset_to_inst[offset + n].opcode != dis.EXTENDED_ARG:\n return offset_to_inst[offset + n]\n return None", + "docstring": "Get the instruction located at a given offset, accounting for EXTENDED_ARGs", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", + "ast_data": "FunctionDef name:_get_instruction_by_offset arg:offset_to_inst arg:offset arguments arg arg For If Compare Return return:yes Return return:no" + }, + { + "library": "pandas", + "name": "validate_argsort_with_ascending", + "source_code": "def validate_argsort_with_ascending(ascending: bool | int | None, args, kwargs) -> bool:\n if is_integer(ascending) or ascending is None:\n args = (ascending,) + args\n ascending = True\n validate_argsort_kind(args, kwargs, max_fname_arg_count=3)\n ascending = cast(bool, ascending)\n return ascending", + "docstring": "If 'Categorical.argsort' is called via the 'numpy' library, the first parameter in its signature is 'axis', which takes either an integer or 'None', so check if the 'ascending' parameter has either integer type or is None, since 'ascending' itself should be a boolean", + "type": "function", + "file_path": "pandas\\pandas\\compat\\numpy\\function.py", + "ast_data": "FunctionDef name:validate_argsort_with_ascending arg:ascending arg:args arg:kwargs arguments arg arg arg If BoolOp Call Compare Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "max_pool2d_with_indices", + "source_code": "def max_pool2d_with_indices(input: Tensor, kernel_size: BroadcastingList2[int], stride: Optional[BroadcastingList2[int]]=None, padding: BroadcastingList2[int]=0, dilation: BroadcastingList2[int]=1, ceil_mode: bool=False, return_indices: bool=False) -> tuple[Tensor, Tensor]:\n if has_torch_function_unary(input):\n return handle_torch_function(max_pool2d_with_indices, (input,), input, kernel_size, stride=stride, padding=padding, dilation=dilation, ceil_mode=ceil_mode, return_indices=return_indices)\n if stride is None:\n stride = torch.jit.annotate(list[int], [])\n return torch._C._nn.max_pool2d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode)", + "docstring": "max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False) Applies a 2D max pooling over an input signal composed of several input planes. .. note:: The order of :attr: and :attr: is different from what seen in :class:, and will change in a future release. See :class: for details. Args: input: input tensor :math:, minibatch dim optional. kernel_size: size of the pooling region. Can be a single number or a tuple stride: stride of the pooling operation. Can be a single number or a tuple . Default: :attr: padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and 0. ceil_mode: If `ceilfloortorch.nn.functional.max_unpool2d` later", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:max_pool2d_with_indices arg:input arg:kernel_size arg:stride arg:padding arg:dilation arg:ceil_mode arg:return_indices arguments arg arg arg arg arg arg arg If Call Return return:yes Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "write_version_info", + "source_code": "def write_version_info(filename, git_version):\n if b'\"' in git_version or b'\\\\' in git_version:\n git_version = b'git_version_is_invalid'\n contents = '\\n/* Generated by gen_git_source.py */\\n\\n#ifndef TENSORFLOW_CORE_UTIL_VERSION_INFO_H_\\n#define TENSORFLOW_CORE_UTIL_VERSION_INFO_H_\\n\\n#define STRINGIFY(x) #x\\n#define TOSTRING(x) STRINGIFY(x)\\n\\n#define TF_GIT_VERSION \"%s\"\\n#ifdef _MSC_VER\\n#define TF_COMPILER_VERSION \"MSVC \" TOSTRING(_MSC_FULL_VER)\\n#else\\n#define TF_COMPILER_VERSION __VERSION__\\n#endif\\n#ifdef _GLIBCXX_USE_CXX11_ABI\\n#define TF_CXX11_ABI_FLAG _GLIBCXX_USE_CXX11_ABI\\n#else\\n#define TF_CXX11_ABI_FLAG 0\\n#endif\\n#define TF_CXX_VERSION __cplusplus\\n#ifdef TENSORFLOW_MONOLITHIC_BUILD\\n#define TF_MONOLITHIC_BUILD 1\\n#else\\n#define TF_MONOLITHIC_BUILD 0\\n#endif\\n\\n#endif // TENSORFLOW_CORE_UTIL_VERSION_INFO_H_\\n' % git_version.decode('utf-8')\n open(filename, 'w').write(contents)", + "docstring": "Write a c file that defines the version functions. Args: filename: filename to write to. git_version: the result of a git describe.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\git\\gen_git_source.py", + "ast_data": "FunctionDef name:write_version_info arg:filename arg:git_version arguments arg arg If BoolOp Compare Compare Assign Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "serialized", + "source_code": "def serialized(self):\n if self._serialized is None:\n proto = topology_pb2.TopologyProto()\n proto.mesh_shape[:] = list(self._mesh_shape)\n proto.num_tasks = self._device_coordinates.shape[0]\n proto.num_tpu_devices_per_task = self._device_coordinates.shape[1]\n proto.device_coordinates.extend(list(self._device_coordinates.flatten()))\n self._serialized = proto.SerializeToString()\n return self._serialized", + "docstring": "Returns the serialized form of the topology.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py", + "ast_data": "FunctionDef name:serialized arg:self arguments arg If Compare Assign Call Assign Call Assign Assign Call Call Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "requantize_bias_helper", + "source_code": "def requantize_bias_helper(g: jit_utils.GraphContext, bias, input_scale, weight_scale, axis=None):\n bias_scale = g.op('Mul', weight_scale, input_scale)\n bias_scale_shape = g.op('Shape', bias_scale)\n bias_zero_point = g.op('ConstantOfShape', bias_scale_shape, value_t=torch.tensor([0], dtype=torch.int))\n q_bias = g.op('Cast', g.op('Div', bias, bias_scale), to_i=_C_onnx.TensorProtoDataType.INT32)\n axis_args = []\n if axis is not None and (not _is_none(axis)):\n axis_args.append(axis)\n return g.op('prim::TupleConstruct', q_bias, bias_scale, bias_zero_point, *axis_args)", + "docstring": "In PyTorch, bias is float and is quantized to int32 implicitly inside the quantized ATen op kernel. In ONNX we need to make the quantization explicit because operators expect all of their inputs to be quantized. Since int32 is not a supported output type by ONNX operator , quantization is exported using regular operators.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\symbolic_helper.py", + "ast_data": "FunctionDef name:requantize_bias_helper arg:g arg:bias arg:input_scale arg:weight_scale arg:axis arguments arg arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Call Assign If BoolOp Compare Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_flatten_and_filter_composite", + "source_code": "def _flatten_and_filter_composite(maybe_composite, non_composite_output, composite_output=None):\n if isinstance(maybe_composite, composite_tensor.CompositeTensor):\n num_components = len(nest.flatten(maybe_composite, expand_composites=True))\n return (composite_output,) * num_components\n return non_composite_output", + "docstring": "For an input, replaced the input by a tuple if the input is composite. If is not composite, return the parameter otherwise return a tuple which consists of the value of the parameter the same number of times as there are components of the composite tensor. This is useful for computing a mask when flattening nested data with . For example and will have the same length and second will be True if the tensor in the first is derived from a expanding a composite tensor. Args: maybe_composite: A value to test for being a composite tensor. non_composite_output: The value to return when is not a composite. composite_output: the value to fill the output tuple with if is a composite. Returns: or a tuple with multiple copies of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py", + "ast_data": "FunctionDef name:_flatten_and_filter_composite arg:maybe_composite arg:non_composite_output arg:composite_output arguments arg arg arg If Call Assign Call Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "back", + "source_code": "def back(self):\n self._pos = max(self._pos - 1, 0)\n return self()", + "docstring": "Move the position back and return the current element.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:back arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "OperatorSupport", + "source_code": "@compatibility(is_backward_compatible=False)\nclass OperatorSupport(OperatorSupportBase):\n _support_dict: SupportDict\n\n def __init__(self, support_dict: t.Optional[SupportDict]=None):\n self._support_dict = support_dict or {}\n\n def is_node_supported(self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node) -> bool:\n if node.op not in CALLABLE_NODE_OPS:\n return True\n target = get_node_target(submodules, node)\n if target not in self._support_dict:\n return False\n if self._support_dict[target] is None:\n return True\n args_dtypes, kwargs_dtypes = self._support_dict[target]\n for i, dtypes in enumerate(args_dtypes):\n if len(node.args) <= i:\n break\n if dtypes is None:\n continue\n if not isinstance(node.args[i], torch.fx.Node):\n continue\n arg_dtype = _get_arg_dtype(node.args[i])\n if arg_dtype not in dtypes:\n return False\n for k, dtypes in kwargs_dtypes.items():\n if k not in node.kwargs:\n continue\n if not isinstance(node.kwargs[k], torch.fx.Node):\n continue\n kwarg_dtype = _get_arg_dtype(node.kwargs[k])\n if kwarg_dtype not in dtypes:\n return False\n return True", + "docstring": "maps node.target typename to supported inputs dtypes. node.target typename is retrieved using helper function If supported inputs dtypes is None, it means any dtype is supported, else we should see a tuple like (([dtypes], ...), {\"name\":[dtypes], ...}). The first tuple ([dtypes], ...) indicates what dtypes are supported for inputs in node.args and the second dict {\"name\": [dtypes], ...} indicates what dtypes are supported for inputs in node.kwargs. For inputs in args, if we don't want to check it, we can put None there, e.g. (None, [torch.float]) indicates that we don't care about the type of the first input in args. And for inputs in kwargs, if not listed, will not be checked.", + "type": "class", + "file_path": "pytorch\\torch\\fx\\passes\\operator_support.py", + "ast_data": "ClassDef name:OperatorSupport FunctionDef name:__init__ arg:self arg:support_dict arguments arg arg Assign BoolOp FunctionDef name:is_node_supported arg:self arg:submodules arg:node arguments arg arg arg If Compare Return return:yes Assign Call If Compare Return return:yes If Compare Return return:yes Assign For Call If Compare Call If Compare If Call Assign Call If Compare Return return:yes For Call If Compare If Call Assign Call If Compare Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_valid_dtypes", + "source_code": "def _valid_dtypes(self):\n return set([dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])", + "docstring": "Valid types for loss, variables and gradients. Subclasses should override to allow other float types. Returns: Valid types for loss, variables and gradients.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py", + "ast_data": "FunctionDef name:_valid_dtypes arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, canvas, window=None, *, pack_toolbar=True):\n if window is None:\n window = canvas.get_tk_widget().master\n tk.Frame.__init__(self, master=window, borderwidth=2, width=int(canvas.figure.bbox.width), height=50)\n self._buttons = {}\n for text, tooltip_text, image_file, callback in self.toolitems:\n if text is None:\n self._Spacer()\n else:\n self._buttons[text] = button = self._Button(text, str(cbook._get_data_path(f'images/{image_file}.png')), toggle=callback in ['zoom', 'pan'], command=getattr(self, callback))\n if tooltip_text is not None:\n add_tooltip(button, tooltip_text)\n self._label_font = tkinter.font.Font(root=window, size=10)\n label = tk.Label(master=self, font=self._label_font, text='\\xa0\\n\\xa0')\n label.pack(side=tk.RIGHT)\n self.message = tk.StringVar(master=self)\n self._message_label = tk.Label(master=self, font=self._label_font, textvariable=self.message, justify=tk.RIGHT)\n self._message_label.pack(side=tk.RIGHT)\n NavigationToolbar2.__init__(self, canvas)\n if pack_toolbar:\n self.pack(side=tk.BOTTOM, fill=tk.X)", + "docstring": "Parameters ---------- canvas : The figure canvas on which to operate. window : tk.Window The tk.Window which owns this toolbar. pack_toolbar : bool, default: True If True, add the toolbar to the parent's pack manager's packing list during initialization with ``.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\_backend_tk.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:canvas arg:window arguments arg arg arg arg If Compare Assign Call Call Call Assign For If Compare Call Assign Call Call Call Compare Call If Compare Call Assign Call Assign Call Call Assign Call Assign Call Call Call If Call" + }, + { + "library": "pytorch", + "name": "pre_load_state_dict_transform", + "source_code": "@abstractmethod\ndef pre_load_state_dict_transform(self, tensor: torch.Tensor) -> tuple[torch.Tensor, list[Shard]]:\n ...", + "docstring": "This is to be called before loading a *sharded* model state dict and should return the tensor and list of shards from which to load data.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_fsdp_extensions.py", + "ast_data": "FunctionDef name:pre_load_state_dict_transform arg:self arg:tensor arguments arg arg" + }, + { + "library": "tensorflow", + "name": "multinomial", + "source_code": "@tf_export(v1=['random.multinomial', 'multinomial'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated(date=None, instructions='Use `tf.random.categorical` instead.')\ndef multinomial(logits, num_samples, seed=None, name=None, output_dtype=None):\n with ops.name_scope(name, 'multinomial', [logits]):\n return multinomial_categorical_impl(logits, num_samples, output_dtype, seed)", + "docstring": "Draws samples from a multinomial distribution. Example: Args: logits: 2-D Tensor with shape . Each slice represents the unnormalized log-probabilities for all classes. num_samples: 0-D. Number of independent samples to draw for each row slice. seed: A Python integer. Used to create a random seed for the distribution. See for behavior. name: Optional name for the operation. output_dtype: The integer type of the output: or . Defaults to . Returns: The drawn samples of shape .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops.py", + "ast_data": "FunctionDef name:multinomial arg:logits arg:num_samples arg:seed arg:name arg:output_dtype arguments arg arg arg arg arg With Call Return return:yes Call Call Call" + }, + { + "library": "sphinx", + "name": "decorate", + "source_code": "def decorate(self, content: StringList) -> None:\n prepend_prolog(content, self.config.rst_prolog)\n append_epilog(content, self.config.rst_epilog)", + "docstring": "Preprocess reST content before parsing.", + "type": "method", + "file_path": "sphinx\\sphinx\\parsers.py", + "ast_data": "FunctionDef name:decorate arg:self arg:content arguments arg arg Call Call" + }, + { + "library": "matplotlib", + "name": "set_frameon", + "source_code": "def set_frameon(self, b):\n self.patch.set_visible(b)\n self.stale = True", + "docstring": "Set the figure's background patch visibility, i.e. whether the figure background will be drawn. Equivalent to ``. Parameters ---------- b : bool", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:set_frameon arg:self arg:b arguments arg arg Call Assign" + }, + { + "library": "django", + "name": "get_permission_required", + "source_code": "def get_permission_required(self):\n if self.permission_required is None:\n raise ImproperlyConfigured(f'{self.__class__.__name__} is missing the permission_required attribute. Define {self.__class__.__name__}.permission_required, or override {self.__class__.__name__}.get_permission_required().')\n if isinstance(self.permission_required, str):\n perms = (self.permission_required,)\n else:\n perms = self.permission_required\n return perms", + "docstring": "Override this method to override the permission_required attribute. Must return an iterable.", + "type": "method", + "file_path": "django\\django\\contrib\\auth\\mixins.py", + "ast_data": "FunctionDef name:get_permission_required arg:self arguments arg If Compare Raise Call If Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_SparseReduceSumGrad", + "source_code": "@ops.RegisterGradient('SparseReduceSum')\ndef _SparseReduceSumGrad(op: ops.Operation, out_grad):\n sp_indices = op.inputs[0]\n sp_shape = op.inputs[2]\n output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3])\n out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims)\n scale = sp_shape // math_ops.cast(output_shape_kept_dims, dtypes.int64)\n return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale), None, None)", + "docstring": "Similar to gradient for the Sum Op (i.e. tf.reduce_sum()).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_grad.py", + "ast_data": "FunctionDef name:_SparseReduceSumGrad arg:op arg:out_grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "_update_raw", + "source_code": "def _update_raw(self, other_params):\n if isinstance(other_params, RcParams):\n other_params = dict.items(other_params)\n dict.update(self, other_params)", + "docstring": "Directly update the data from *other_params*, bypassing deprecation, backend and validation logic on both sides. This `.RcParams` The input mapping from which to update.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\__init__.py", + "ast_data": "FunctionDef name:_update_raw arg:self arg:other_params arguments arg arg If Call Assign Call Call" + }, + { + "library": "pytorch", + "name": "return_first_non_observer_node", + "source_code": "def return_first_non_observer_node(node: Node, gm: GraphModule) -> Node:\n if node.op == 'call_module':\n node_obj = getattr_from_fqn(gm, node.target)\n if _is_activation_post_process(node_obj):\n assert len(node.args) == 1\n assert isinstance(node.args[0], Node)\n node = node.args[0]\n assert isinstance(node.target, str)\n node_obj = getattr_from_fqn(gm, node.target)\n if _is_activation_post_process(node_obj):\n assert len(node.args) == 1\n assert isinstance(node.args[0], Node)\n node = node.args[0]\n return node", + "docstring": "If node is not an observer, returns it. If node is an observer, navigates up the graph and returns the first parent which is not an observer. For example, graph: (node_non_obs), node = node_non_obs : returns node_non_obs graph: (node_non_obs -> obs0), node = obs0 : returns node_non_obs graph: (node_non_obs -> obs0 -> fq0), node = fq0 : returns node_non_obs", + "type": "function", + "file_path": "pytorch\\torch\\ao\\ns\\fx\\utils.py", + "ast_data": "FunctionDef name:return_first_non_observer_node arg:node arg:gm arguments arg arg If Compare Assign Call If Call Compare Call Call Assign Call Assign Call If Call Compare Call Call Assign Return return:yes" + }, + { + "library": "cryptography", + "name": "tag", + "source_code": "@property\n@abc.abstractmethod\ndef tag(self) -> bytes:\n pass", + "docstring": "Returns tag bytes. This is only available after encryption is finalized.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\ciphers\\base.py", + "ast_data": "FunctionDef name:tag arg:self arguments arg" + }, + { + "library": "pandas", + "name": "is_view", + "source_code": "@property\ndef is_view(self) -> bool:\n return self.values.base is not None", + "docstring": "return a boolean if I am possibly a view", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\blocks.py", + "ast_data": "FunctionDef name:is_view arg:self arguments arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "sign", + "source_code": "@tf_export('math.sign', 'sign')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef sign(x, name=None):\n x = ops.convert_to_tensor(x)\n if x.dtype.is_complex:\n return gen_math_ops.div_no_nan(x, cast(gen_math_ops.complex_abs(x, Tout=dtypes.float32 if x.dtype == dtypes.complex64 else dtypes.float64), dtype=x.dtype), name=name)\n return gen_math_ops.sign(x, name=name)", + "docstring": "Returns an element-wise indication of the sign of a number. . For complex numbers, . Example usage: >>> # real number >>> tf.math.sign([0., 2., -3.]) >>> # complex number >>> tf.math.sign([1 + 1j, 0 + 0j]) Args: x: A Tensor. Must be one of the following types: bfloat16, half, float32, float64, int32, int64, complex64, complex128. name: A name for the operation (optional). Returns: A Tensor. Has the same type as x. If x is a SparseTensor, returns SparseTensor(x.indices, tf.math.sign(x.values, ...), x.dense_shape).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:sign arg:x arg:name arguments arg arg Assign Call If Return return:yes Call Call Call Compare Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "FormatStrFormatter", + "source_code": "class FormatStrFormatter(Formatter):\n\n def __init__(self, fmt):\n self.fmt = fmt\n\n def __call__(self, x, pos=None):\n return self.fmt % x", + "docstring": "Use an old-style ('%' operator) format string to format the tick. The format string should have a single variable format (%) in it. It will be applied to the value (not the position) of the tick. Negative numeric values (e.g., -1) will use a dash, not a Unicode minus; use mathtext to get a Unicode minus by wrapping the format specifier with $ (e.g. \"$%g$\").", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "ClassDef name:FormatStrFormatter FunctionDef name:__init__ arg:self arg:fmt arguments arg arg Assign FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Return return:yes" + }, + { + "library": "kornia", + "name": "inverse", + "source_code": "def inverse(self) -> So3:\n return So3(self.q.conj())", + "docstring": "Return the inverse transformation. Example: >>> s = So3.identity() >>> s.inverse() Parameter containing: tensor([1., -0., -0., -0.], requires_grad=True)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py", + "ast_data": "FunctionDef name:inverse arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "bottom_hat", + "source_code": "def bottom_hat(tensor: torch.Tensor, kernel: torch.Tensor, structuring_element: Optional[torch.Tensor]=None, origin: Optional[List[int]]=None, border_type: str='geodesic', border_value: float=0.0, max_val: float=10000.0, engine: str='unfold') -> torch.Tensor:\n if not isinstance(tensor, torch.Tensor):\n raise TypeError(f'Input type is not a torch.Tensor. Got {type(tensor)}')\n if len(tensor.shape) != 4:\n raise ValueError(f'Input size must have 4 dimensions. Got {tensor.dim()}')\n if not isinstance(kernel, torch.Tensor):\n raise TypeError(f'Kernel type is not a torch.Tensor. Got {type(kernel)}')\n if len(kernel.shape) != 2:\n raise ValueError(f'Kernel size must have 2 dimensions. Got {kernel.dim()}')\n return closing(tensor, kernel=kernel, structuring_element=structuring_element, origin=origin, border_type=border_type, border_value=border_value, max_val=max_val, engine=engine) - tensor", + "docstring": "Return the bottom hat transformation of an image. .. image:: _static/img/bottom_hat.png That means, (closed_image - image) applying the same kernel in each channel. The kernel must have 2 dimensions. See :func: for details. Args: tensor: Image with shape :math:. kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give the set of neighbors of the center over which the operation is applied. Its shape is :math:. For full structural elements use torch.ones_like(structural_element). structuring_element: Structuring element used for the grayscale dilation. It may be a non-flat structuring element. origin: Origin of the structuring element. Default: `(B, C, H, W)here `__. Example: >>> tensor = torch.rand(1, 3, 5, 5) >>> kernel = torch.ones(3, 3) >>> bottom_hat_img = bottom_hat(tensor, kernel)", + "type": "function", + "file_path": "kornia\\kornia\\morphology\\morphology.py", + "ast_data": "FunctionDef name:bottom_hat arg:tensor arg:kernel arg:structuring_element arg:origin arg:border_type arg:border_value arg:max_val arg:engine arguments arg arg arg arg arg arg arg arg If Call Raise Call Call If Compare Call Raise Call Call If Call Raise Call Call If Compare Call Raise Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "Hardtanh", + "source_code": "class Hardtanh(Module):\n __constants__ = ['min_val', 'max_val', 'inplace']\n min_val: float\n max_val: float\n inplace: bool\n\n def __init__(self, min_val: float=-1.0, max_val: float=1.0, inplace: bool=False, min_value: Optional[float]=None, max_value: Optional[float]=None) -> None:\n super().__init__()\n if min_value is not None:\n warnings.warn('keyword argument `min_value` is deprecated and rename to `min_val`', FutureWarning, stacklevel=2)\n min_val = min_value\n if max_value is not None:\n warnings.warn('keyword argument `max_value` is deprecated and rename to `max_val`', FutureWarning, stacklevel=2)\n max_val = max_value\n self.min_val = min_val\n self.max_val = max_val\n self.inplace = inplace\n assert self.max_val > self.min_val\n\n def forward(self, input: Tensor) -> Tensor:\n return F.hardtanh(input, self.min_val, self.max_val, self.inplace)\n\n def extra_repr(self) -> str:\n inplace_str = ', inplace=True' if self.inplace else ''\n return f'min_val={self.min_val}, max_val={self.max_val}{inplace_str}'", + "docstring": "Applies the HardTanh function element-wise. HardTanh is defined as: .. math:: \\text{HardTanh}(x) = \\begin{cases} \\text{max\\_val} & \\text{ if } x > \\text{ max\\_val } \\\\ \\text{min\\_val} & \\text{ if } x >> m = nn.Hardtanh(-2, 2) >>> input = torch.randn(2) >>> output = m(input)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\activation.py", + "ast_data": "ClassDef name:Hardtanh Assign FunctionDef name:__init__ arg:self arg:min_val arg:max_val arg:inplace arg:min_value arg:max_value arguments arg arg arg arg arg arg Call Call If Compare Call Assign If Compare Call Assign Assign Assign Assign Compare FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Assign Return return:yes" + }, + { + "library": "pandas", + "name": "column_data_lengths", + "source_code": "def column_data_lengths(self) -> np.ndarray:\n return np.asarray(self._column_data_lengths, dtype=np.int64)", + "docstring": "Return a numpy int64 array of the column data lengths", + "type": "method", + "file_path": "pandas\\pandas\\io\\sas\\sas7bdat.py", + "ast_data": "FunctionDef name:column_data_lengths arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "RelaxedOneHotCategorical", + "source_code": "class RelaxedOneHotCategorical(TransformedDistribution):\n arg_constraints = {'probs': constraints.simplex, 'logits': constraints.real_vector}\n support = constraints.simplex\n has_rsample = True\n base_dist: ExpRelaxedCategorical\n\n def __init__(self, temperature: Tensor, probs: Optional[Tensor]=None, logits: Optional[Tensor]=None, validate_args: Optional[bool]=None) -> None:\n base_dist = ExpRelaxedCategorical(temperature, probs, logits, validate_args=validate_args)\n super().__init__(base_dist, ExpTransform(), validate_args=validate_args)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(RelaxedOneHotCategorical, _instance)\n return super().expand(batch_shape, _instance=new)\n\n @property\n def temperature(self) -> Tensor:\n return self.base_dist.temperature\n\n @property\n def logits(self) -> Tensor:\n return self.base_dist.logits\n\n @property\n def probs(self) -> Tensor:\n return self.base_dist.probs", + "docstring": "Creates a RelaxedOneHotCategorical distribution parametrized by :attr:, and either :attr: or :attr:. This is a relaxed version of the :class: distribution, so its samples are on simplex, and are reparametrizable. Example:: >>> # xdoctest: +IGNORE_WANT(\"non-deterministic\") >>> m = RelaxedOneHotCategorical(torch.tensor([2.2]), ... torch.tensor([0.1, 0.2, 0.3, 0.4])) >>> m.sample() tensor([ 0.1294, 0.2324, 0.3859, 0.2523]) Args: temperature (Tensor): relaxation temperature probs (Tensor): event probabilities logits (Tensor): unnormalized log probability for each event", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\relaxed_categorical.py", + "ast_data": "ClassDef name:RelaxedOneHotCategorical Assign Assign Assign FunctionDef name:__init__ arg:self arg:temperature arg:probs arg:logits arg:validate_args arguments arg arg arg arg arg Assign Call Call Call Call FunctionDef name:expand arg:self arg:batch_shape arg:_instance arguments arg arg arg Assign Call Return return:yes Call Call FunctionDef name:temperature arg:self arguments arg Return return:yes FunctionDef name:logits arg:self arguments arg Return return:yes FunctionDef name:probs arg:self arguments arg Return return:yes" + }, + { + "library": "pygame", + "name": "rmpath", + "source_code": "def rmpath(path: Path, verbose: bool=False):\n if path.is_symlink():\n if verbose:\n print(f\"- Removing existing symlink at '{path}'\")\n path.unlink()\n elif path.is_file():\n if verbose:\n print(f\"- Removing existing file at '{path}'\")\n path.unlink()\n elif path.is_dir():\n if verbose:\n print(f\"- Removing existing directory at '{path}'\")\n shutil.rmtree(path)", + "docstring": "Tries to remove a path of any kind", + "type": "function", + "file_path": "pygame\\buildconfig\\macdependencies\\install_mac_deps.py", + "ast_data": "FunctionDef name:rmpath arg:path arg:verbose arguments arg arg If Call If Call Call If Call If Call Call If Call If Call Call" + }, + { + "library": "tensorflow", + "name": "_copy_trackable_to_cpu", + "source_code": "def _copy_trackable_to_cpu(self, object_map):\n if self not in object_map:\n op_device = pydev.DeviceSpec.from_string(self.device).replace(device_type='CPU', device_index=0).to_string()\n with ops.device(op_device):\n new_var = UninitializedVariable(trainable=self.trainable, shape=self.shape, dtype=self.dtype, name=self._shared_name)\n object_map[self] = new_var\n destination_var = object_map[self]\n with ops.device(destination_var.device):\n destination_var.assign(self.read_value())", + "docstring": "For implementing .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", + "ast_data": "FunctionDef name:_copy_trackable_to_cpu arg:self arg:object_map arguments arg arg If Compare Assign Call Call Call With Call Assign Call Assign Assign With Call Call Call" + }, + { + "library": "scipy", + "name": "_fractional_power_superdiag_entry", + "source_code": "def _fractional_power_superdiag_entry(l1, l2, t12, p):\n if l1 == l2:\n f12 = t12 * p * l1 ** (p - 1)\n elif abs(l2 - l1) > abs(l1 + l2) / 2:\n f12 = t12 * (l2 ** p - l1 ** p) / (l2 - l1)\n else:\n z = (l2 - l1) / (l2 + l1)\n log_l1 = np.log(l1)\n log_l2 = np.log(l2)\n arctanh_z = np.arctanh(z)\n tmp_a = t12 * np.exp(p / 2 * (log_l2 + log_l1))\n tmp_u = _unwindk(log_l2 - log_l1)\n if tmp_u:\n tmp_b = p * (arctanh_z + np.pi * 1j * tmp_u)\n else:\n tmp_b = p * arctanh_z\n tmp_c = 2 * np.sinh(tmp_b) / (l2 - l1)\n f12 = tmp_a * tmp_c\n return f12", + "docstring": "Compute a superdiagonal entry of a fractional matrix power. This is Eq. (5.6) in [1]_. Parameters ---------- l1 : complex A diagonal entry of the matrix. l2 : complex A diagonal entry of the matrix. t12 : complex A superdiagonal entry of the matrix. p : float A fractional power. Returns ------- f12 : complex A superdiagonal entry of the fractional matrix power. Notes ----- Care has been taken to return a real number if possible when all of the inputs are real numbers. References ---------- .. [1] Nicholas J. Higham and Lijing lin (2011) \"A Schur-Pade Algorithm for Fractional Powers of a Matrix.\" SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_matfuncs_inv_ssq.py", + "ast_data": "FunctionDef name:_fractional_power_superdiag_entry arg:l1 arg:l2 arg:t12 arg:p arguments arg arg arg arg If Compare Assign If Compare Call Call Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call If Assign Assign Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "mode", + "source_code": "@property\ndef mode(self):\n return self.__mode", + "docstring": "Returns the mode in which the file was opened.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", + "ast_data": "FunctionDef name:mode arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "RoutedDecoderIterDataPipe", + "source_code": "@functional_datapipe('routed_decode')\nclass RoutedDecoderIterDataPipe(IterDataPipe[tuple[str, Any]]):\n\n def __init__(self, datapipe: Iterable[tuple[str, BufferedIOBase]], *handlers: Callable, key_fn: Callable=extension_extract_fn) -> None:\n super().__init__()\n self.datapipe: Iterable[tuple[str, BufferedIOBase]] = datapipe\n if not handlers:\n handlers = (decoder_basichandlers, decoder_imagehandler('torch'))\n self.decoder = Decoder(*handlers, key_fn=key_fn)\n _deprecation_warning(type(self).__name__, deprecation_version='1.12', removal_version='1.13', old_functional_name='routed_decode')\n\n def add_handler(self, *handler: Callable) -> None:\n self.decoder.add_handler(*handler)\n\n def __iter__(self) -> Iterator[tuple[str, Any]]:\n for data in self.datapipe:\n pathname = data[0]\n result = self.decoder(data)\n yield (pathname, result[pathname])\n\n def __len__(self) -> int:\n if isinstance(self.datapipe, Sized):\n return len(self.datapipe)\n raise TypeError(f\"{type(self).__name__} instance doesn't have valid length\")", + "docstring": "Decodes binary streams from input DataPipe, yields pathname and decoded data in a tuple. (functional name: `` is specified returning anything other than extension, the default handler will not work and users need to specify custom handler. Custom handler could use regex to determine the eligibility to handle data.", + "type": "class", + "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\routeddecoder.py", + "ast_data": "ClassDef name:RoutedDecoderIterDataPipe FunctionDef name:__init__ arg:self arg:datapipe arguments arg arg arg arg Call Call If Assign Call Assign Call Call Call FunctionDef name:add_handler arg:self arguments arg arg Call FunctionDef name:__iter__ arg:self arguments arg For Assign Assign Call FunctionDef name:__len__ arg:self arguments arg If Call Return return:yes Call Raise Call Call Call" + }, + { + "library": "pandas", + "name": "dates", + "source_code": "def dates(self, start_date, end_date, return_name: bool=False) -> Series | DatetimeIndex:\n start_date = Timestamp(start_date)\n end_date = Timestamp(end_date)\n filter_start_date = start_date\n filter_end_date = end_date\n if self.year is not None:\n dt = Timestamp(datetime(self.year, self.month, self.day))\n dti = DatetimeIndex([dt])\n if return_name:\n return Series(self.name, index=dti)\n else:\n return dti\n dates = self._reference_dates(start_date, end_date)\n holiday_dates = self._apply_rule(dates)\n if self.days_of_week is not None:\n holiday_dates = holiday_dates[np.isin(holiday_dates.dayofweek, self.days_of_week).ravel()]\n if self.start_date is not None:\n filter_start_date = max(self.start_date.tz_localize(filter_start_date.tz), filter_start_date)\n if self.end_date is not None:\n filter_end_date = min(self.end_date.tz_localize(filter_end_date.tz), filter_end_date)\n holiday_dates = holiday_dates[(holiday_dates >= filter_start_date) & (holiday_dates <= filter_end_date)]\n if return_name:\n return Series(self.name, index=holiday_dates)\n return holiday_dates", + "docstring": "Calculate holidays observed between start date and end date Parameters ---------- start_date : starting date, datetime-like, optional end_date : ending date, datetime-like, optional return_name : bool, optional, default=False If True, return a series that has dates and holiday names. False will only return dates. Returns ------- Series or DatetimeIndex Series if return_name is True", + "type": "method", + "file_path": "pandas\\pandas\\tseries\\holiday.py", + "ast_data": "FunctionDef name:dates arg:self arg:start_date arg:end_date arg:return_name arguments arg arg arg arg Assign Call Assign Call Assign Assign If Compare Assign Call Call Assign Call If Return return:yes Call Return return:yes Assign Call Assign Call If Compare Assign Call Call If Compare Assign Call Call If Compare Assign Call Call Assign Compare Compare If Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "pip_install", + "source_code": "@timed('Installing packages')\ndef pip_install(self, *packages: str, prerelease: bool=False, upgrade: bool=False, **popen_kwargs: Any) -> subprocess.CompletedProcess[str]:\n if upgrade:\n args = ['--upgrade', *packages]\n verb = 'Upgrading'\n else:\n args = list(packages)\n verb = 'Installing'\n if prerelease:\n args = ['--pre', *args]\n print(f'{verb} package(s) ({self.pip_source.index_url}): {', '.join(map(os.path.basename, packages))}')\n return self.pip('install', *args, **popen_kwargs)", + "docstring": "Run a pip install command in the virtual environment.", + "type": "method", + "file_path": "pytorch\\tools\\nightly.py", + "ast_data": "FunctionDef name:pip_install arg:self arguments arg arg arg arg arg If Assign Assign Assign Call Assign If Assign Call Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "GraphInputMatcher", + "source_code": "@dataclasses.dataclass\nclass GraphInputMatcher:\n tensor_id_to_arg_idx: dict[int, int]\n graph_input_tensor_ids: list[int]\n graph_input_ivalues: list[Any]\n\n def __call__(self, args):\n real_input = []\n for tensor_id, traced_ivalue in zip(self.graph_input_tensor_ids, self.graph_input_ivalues):\n arg_idx = self.tensor_id_to_arg_idx.get(tensor_id, None)\n if arg_idx is None:\n inp = traced_ivalue\n else:\n inp = args[arg_idx]\n real_input.append(inp)\n return real_input", + "docstring": "The GraphInputMatcher class setup the graph inputs for future calls after lazy tracing. Specifically, those graph inputs corresponding to method parameters should be replaced with the arguments for the current call. tensor_id_to_arg_idx maps the tensor id to the parameter index. graph_input_tensor_ids, graph_input_ivalues list the tensor_id and ivalue for each of the TS/XLA graph inputs.", + "type": "class", + "file_path": "pytorch\\torch\\_lazy\\extract_compiled_graph.py", + "ast_data": "ClassDef name:GraphInputMatcher FunctionDef name:__call__ arg:self arg:args arguments arg arg Assign For Call Assign Call If Compare Assign Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "arange_sequence", + "source_code": "def arange_sequence(ranges: Tensor) -> Tensor:\n maxcnt = torch.max(ranges).item()\n numuni = ranges.shape[0]\n complete_ranges = torch.arange(maxcnt, device=ranges.device).unsqueeze(0).expand(numuni, -1)\n return complete_ranges[complete_ranges < ranges.unsqueeze(-1)]", + "docstring": "Return a sequence of the ranges specified by the argument. Example: [2, 5, 1, 2] -> [0, 1, 0, 1, 2, 3, 4, 0, 0, 1]", + "type": "function", + "file_path": "kornia\\kornia\\feature\\adalam\\utils.py", + "ast_data": "FunctionDef name:arange_sequence arg:ranges arguments arg Assign Call Call Assign Assign Call Call Call Return return:yes Compare Call" + }, + { + "library": "pytorch", + "name": "CeilDiv", + "source_code": "class CeilDiv(sympy.Function):\n is_integer = True\n\n def __new__(cls, base, divisor):\n base = sympy.sympify(base)\n divisor = sympy.sympify(divisor)\n if sympy.gcd(base, divisor) == divisor:\n return CleanDiv(base, divisor)\n else:\n return FloorDiv(base + (divisor - 1), divisor)", + "docstring": "Div used in indexing that rounds up.", + "type": "class", + "file_path": "pytorch\\torch\\utils\\_sympy\\functions.py", + "ast_data": "ClassDef name:CeilDiv Assign FunctionDef name:__new__ arg:cls arg:base arg:divisor arguments arg arg arg Assign Call Assign Call If Compare Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "settings", + "source_code": "@property\ndef settings(self) -> RendezvousSettings:\n return self._settings", + "docstring": "Get the settings of the rendezvous.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py", + "ast_data": "FunctionDef name:settings arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "script_if_tracing", + "source_code": "def script_if_tracing(fn):\n return _script_if_tracing(fn)", + "docstring": "Compiles `ScriptFunctiontorch.jit.scriptfn` is returned.", + "type": "function", + "file_path": "pytorch\\torch\\jit\\__init__.py", + "ast_data": "FunctionDef name:script_if_tracing arg:fn arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "cpu_device_name_at_coordinates", + "source_code": "def cpu_device_name_at_coordinates(self, device_coordinates, job=None):\n return _tpu_host_device_name(job, self._topology_tasks[tuple(device_coordinates)])", + "docstring": "Returns the CPU device attached to a logical core.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py", + "ast_data": "FunctionDef name:cpu_device_name_at_coordinates arg:self arg:device_coordinates arg:job arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pygame", + "name": "set_timing_treshold", + "source_code": "def set_timing_treshold(self, time_ms):\n warn('This function will be removed, use set_timing_threshold function instead', DeprecationWarning)\n self.set_timing_threshold(time_ms)", + "docstring": "set the threshold in milliseconds set_timing_treshold(time_ms): return None Defaults to 1000.0 / 80.0. This means that the screen will be painted using the flip method rather than the update method if the update method is taking so long to update the screen that the frame rate falls below 80 frames per second. Raises TypeError if time_ms is not int or float.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:set_timing_treshold arg:self arg:time_ms arguments arg arg Call Call" + }, + { + "library": "pandas", + "name": "reformat_hist_y_given_by", + "source_code": "def reformat_hist_y_given_by(y: np.ndarray, by: IndexLabel | None) -> np.ndarray:\n if by is not None and len(y.shape) > 1:\n return np.array([remove_na_arraylike(col) for col in y.T]).T\n return remove_na_arraylike(y)", + "docstring": "Internal function to reformat y given is applied or not for hist plot. If by is None, input y is 1-d with NaN removed; and if by is not None, groupby will take place and input y is multi-dimensional array.", + "type": "function", + "file_path": "pandas\\pandas\\plotting\\_matplotlib\\groupby.py", + "ast_data": "FunctionDef name:reformat_hist_y_given_by arg:y arg:by arguments arg arg If BoolOp Compare Compare Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "RestoredOptimizer", + "source_code": "class RestoredOptimizer(OptimizerV2):\n\n def __init__(self):\n super(RestoredOptimizer, self).__init__('RestoredOptimizer')\n self._hypers_created = True\n\n def get_config(self):\n raise NotImplementedError('Restoring functional Optimizers from SavedModels is not currently supported. Please file a feature request if this limitation bothers you.')", + "docstring": "A non-functional Optimizer implementation for checkpoint compatibility. Holds slot variables and hyperparameters when an optimizer is restored from a SavedModel. These variables may be referenced in functions along with ops created by the original optimizer, but currently we do not support using the optimizer object itself (e.g. through ).", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py", + "ast_data": "ClassDef name:RestoredOptimizer FunctionDef name:__init__ arg:self arguments arg Call Call Assign FunctionDef name:get_config arg:self arguments arg Raise Call" + }, + { + "library": "pytorch", + "name": "arg_name", + "source_code": "def arg_name(self, name: str) -> Optional[str]:\n inplaced = self.inplace_buffers.get(name, None)\n if inplaced is not None and (not isinstance(inplaced, RemovedArg)):\n return inplaced.inner_name\n output_name = self.output_buffers.get(name, None)\n if output_name is not None and (not isinstance(output_name, RemovedArg)):\n return output_name\n return self.input_buffers.get(name, None)", + "docstring": "Returns inner name of a given outer name.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py", + "ast_data": "FunctionDef name:arg_name arg:self arg:name arguments arg arg Assign Call If BoolOp Compare Call Return return:yes Assign Call If BoolOp Compare Call Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "device", + "source_code": "@property\ndef device(self):\n return self.values.device", + "docstring": "The name of the device on which will be produced, or .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py", + "ast_data": "FunctionDef name:device arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "type", + "source_code": "@property\ndef type(self) -> type_t[Any]:\n raise AbstractMethodError(self)", + "docstring": "The scalar type for the array, e.g. `type`.", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\base.py", + "ast_data": "FunctionDef name:type arg:self arguments arg Raise Call" + }, + { + "library": "pytorch", + "name": "qualified_module_class_name", + "source_code": "@property\ndef qualified_module_class_name(self) -> str:\n return self.top().qualified_module_class_name", + "docstring": "Returns the qualified module class name of the top module.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py", + "ast_data": "FunctionDef name:qualified_module_class_name arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='log_cosh'):\n super().__init__(log_cosh, name=name, reduction=reduction)", + "docstring": "Initializes instance. Args: reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. Defaults to 'log_cosh'.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:reduction arg:name arguments arg arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "_get_error_from_remote_values", + "source_code": "def _get_error_from_remote_values(structure):\n errors_in_structure = []\n\n def _get_error(val):\n if isinstance(val, RemoteValue):\n error = val._get_error()\n if error:\n errors_in_structure.append(error)\n nest.map_structure(_get_error, structure)\n if errors_in_structure:\n return errors_in_structure[0]\n else:\n return None", + "docstring": "Attempts to return errors from s. Rebuilds them if needed.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", + "ast_data": "FunctionDef name:_get_error_from_remote_values arg:structure arguments arg Assign FunctionDef name:_get_error arg:val arguments arg If Call Assign Call If Call Call If Return return:yes Return return:no" + }, + { + "library": "matplotlib", + "name": "draw", + "source_code": "def draw() -> None:\n gcf().canvas.draw_idle()", + "docstring": "Redraw the current figure. This is used to update a figure that has been altered, but not automatically re-drawn. If interactive mode is on (via ), this should be only rarely needed, but there may be ways to modify the state of a figure without marking it as \"stale\". Please report these cases as bugs. This is equivalent to calling `` is the current figure. See Also -------- .FigureCanvasBase.draw_idle .FigureCanvasBase.draw", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", + "ast_data": "FunctionDef name:draw arguments Call Call" + }, + { + "library": "numpy", + "name": "arctanh", + "source_code": "@set_module('numpy.lib.scimath')\n@array_function_dispatch(_unary_dispatcher)\ndef arctanh(x):\n x = _fix_real_abs_gt_1(x)\n return nx.arctanh(x)", + "docstring": "Compute the inverse hyperbolic tangent of . Return the \"principal value\" (for a description of this, see ) of `x, or if is complex, the result is complex. Finally, returns`xxoutxnumpy.arctanh`). Examples -------- >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.arctanh(0.5) 0.5493061443340549 >>> from numpy.testing import suppress_warnings >>> with suppress_warnings() as sup: ... sup.filter(RuntimeWarning) ... np.emath.arctanh(np.eye(2)) array([[inf, 0.], [ 0., inf]]) >>> np.emath.arctanh([1j]) array([0.+0.7854j])", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_scimath_impl.py", + "ast_data": "FunctionDef name:arctanh arg:x arguments arg Assign Call Return return:yes Call Call Call" + }, + { + "library": "pandas", + "name": "__eq__", + "source_code": "def __eq__(self, other: object) -> ArrayLike:\n raise AbstractMethodError(self)", + "docstring": "Return for (element-wise equality).", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "ivalue_type_conversion_method", + "source_code": "def ivalue_type_conversion_method(arg_type: BaseType | OptionalType | Type) -> tuple[bool, str] | None:\n type_conversion_methods = {BaseTy.Tensor: ((True, 'toTensor()'), (False, 'toOptional()')), BaseTy.int: ((False, 'toInt()'), (False, 'toOptional()')), BaseTy.bool: ((False, 'toBool()'), (False, 'toOptional()')), BaseTy.Scalar: ((False, 'toScalar()'), (False, 'toOptional()')), BaseTy.ScalarType: ((False, 'toScalarType()'), (False, 'toOptional()')), BaseTy.str: ((False, 'toStringView()'), (False, 'toOptional()'), (False, 'toOptional<::std::string_view>()'))}\n base_ty_object = None\n if isinstance(arg_type, BaseType):\n base_ty_object = arg_type.name\n elif isinstance(arg_type, OptionalType):\n if not isinstance(arg_type.elem, BaseType):\n return None\n base_ty_object = arg_type.elem.name\n else:\n return None\n if base_ty_object not in type_conversion_methods:\n return None\n methods = type_conversion_methods[base_ty_object]\n if isinstance(arg_type, BaseType):\n return methods[0]\n return methods[1]", + "docstring": "Return the method call expression of arg_typearg_type` == BaseTy.Tensor, this function returns \".toTensor()\", so that it can be appended to the ivalue's variable name to get the value of the expected type.", + "type": "function", + "file_path": "pytorch\\torchgen\\static_runtime\\generator.py", + "ast_data": "FunctionDef name:ivalue_type_conversion_method arg:arg_type arguments arg Assign Assign If Call Assign If Call If Call Return return:no Assign Return return:no If Compare Return return:no Assign If Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "with_dtype", + "source_code": "def with_dtype(self, dtype: dtypes.DType) -> 'DynamicRaggedShape.Spec':\n new_rp_specs = [rp.with_dtype(dtype) for rp in self._row_partitions]\n return DynamicRaggedShape.Spec(row_partitions=new_rp_specs, static_inner_shape=self._static_inner_shape, dtype=dtype)", + "docstring": "Return the same spec, but with a different DType.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:with_dtype arg:self arg:dtype arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "rotated", + "source_code": "def rotated(self, radians):\n corners = self.corners()\n corners_rotated = Affine2D().rotate(radians).transform(corners)\n bbox = Bbox.unit()\n bbox.update_from_data_xy(corners_rotated, ignore=True)\n return bbox", + "docstring": "Return the axes-aligned bounding box that bounds the result of rotating this by an angle of *radians*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:rotated arg:self arg:radians arguments arg arg Assign Call Assign Call Call Call Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "close_if_unusable_or_obsolete", + "source_code": "def close_if_unusable_or_obsolete(self):\n if self.connection is not None:\n self.health_check_done = False\n if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']:\n self.close()\n return\n if self.errors_occurred:\n if self.is_usable():\n self.errors_occurred = False\n self.health_check_done = True\n else:\n self.close()\n return\n if self.close_at is not None and time.monotonic() >= self.close_at:\n self.close()\n return", + "docstring": "Close the current connection if unrecoverable errors have occurred or if it outlived its maximum age.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\base.py", + "ast_data": "FunctionDef name:close_if_unusable_or_obsolete arg:self arguments arg If Compare Assign If Compare Call Call Return return:no If If Call Assign Assign Call Return return:no If BoolOp Compare Compare Call Call Return return:no" + }, + { + "library": "pandas", + "name": "round", + "source_code": "def round(self, decimals: int=0, *args, **kwargs) -> Series:\n nv.validate_round(args, kwargs)\n if self.dtype == 'object':\n raise TypeError('Expected numeric dtype, got object instead.')\n new_mgr = self._mgr.round(decimals=decimals)\n return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(self, method='round')", + "docstring": "Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Series.dt.round : Round values of data to the specified freq. Notes ----- For values exactly halfway between rounded decimal values, pandas rounds to the nearest even value (e.g. -0.5 and 0.5 round to 0.0, 1.5 and 2.5 round to 2.0, etc.). Examples -------- >>> s = pd.Series([-0.5, 0.1, 2.5, 1.3, 2.7]) >>> s.round() 0 -0.0 1 0.0 2 2.0 3 1.0 4 3.0 dtype: float64", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:round arg:self arg:decimals arguments arg arg arg arg Call If Compare Raise Call Assign Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "is_valid_im", + "source_code": "@xp_capabilities(warnings=[('dask.array', 'see notes'), ('jax.numpy', 'see notes')])\ndef is_valid_im(R, warning=False, throw=False, name=None):\n xp = array_namespace(R)\n R = _asarray(R, xp=xp)\n return _is_valid_im(R, warning=warning, throw=throw, name=name, materialize=True, xp=xp)", + "docstring": "Return True if the inconsistency matrix passed is valid. It must be a :math: by 4 array of doubles. The standard deviations `n-1scipy.cluster.hierarchy.inconsistentscipy.cluster.hierarchy.is_valid_im` is wrongly constructed (e.g., one of the standard deviations is set to a negative value), then the check will fail: >>> R[-1,1] = R[-1,1] * -1 >>> is_valid_im(R) False", + "type": "function", + "file_path": "scipy\\scipy\\cluster\\hierarchy.py", + "ast_data": "FunctionDef name:is_valid_im arg:R arg:warning arg:throw arg:name arguments arg arg arg arg Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "sphinx", + "name": "note_dependency", + "source_code": "def note_dependency(self, filename: str | os.PathLike[str], *, docname: str | None=None) -> None:\n if docname is None:\n docname = self.docname\n self.dependencies.setdefault(docname, set()).add(_StrPath(filename))", + "docstring": "Add *filename* as a dependency of the current document. This means that the document will be rebuilt if this file changes. *filename* should be absolute or relative to the source directory.", + "type": "method", + "file_path": "sphinx\\sphinx\\environment\\__init__.py", + "ast_data": "FunctionDef name:note_dependency arg:self arg:filename arguments arg arg arg If Compare Assign Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "host_mesh", + "source_code": "def host_mesh(self) -> 'Mesh':\n return self._host_mesh", + "docstring": "Returns a host mesh.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py", + "ast_data": "FunctionDef name:host_mesh arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_reset_state_wrapper", + "source_code": "def _reset_state_wrapper(self):\n self._reset_state_impl()\n self._is_adapted = False", + "docstring": "Calls and sets to .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py", + "ast_data": "FunctionDef name:_reset_state_wrapper arg:self arguments arg Call Assign" + }, + { + "library": "tensorflow", + "name": "_partitioner", + "source_code": "def _partitioner(shape, dtype):\n if axis >= len(shape):\n raise ValueError(f'Cannot partition variable along axis {axis} when shape is only {shape}')\n dtype = dtypes.as_dtype(dtype)\n if dtype.base_dtype == dtypes.string:\n bytes_per_element = bytes_per_string_element\n else:\n bytes_per_element = dtype.size\n total_size_bytes = shape.num_elements() * bytes_per_element\n partitions = total_size_bytes / min_slice_size\n partitions_list = [1] * len(shape)\n partitions_list[axis] = max(1, min(shape.dims[axis].value, max_partitions, int(math.ceil(partitions))))\n return partitions_list", + "docstring": "Partitioner that partitions list for a variable of given shape and type. Ex: Consider partitioning a variable of type float32 with shape=[1024, 1024]. If >= 16, this function would return [(1024 * 1024 * 4) / (256 * 1024), 1] = [16, 1]. If < 16, this function would return [, 1]. Args: shape: Shape of the variable. dtype: Type of the variable. Returns: List of partitions for each axis (currently only one axis can be partitioned). Raises: ValueError: If axis to partition along does not exist for the variable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\partitioned_variables.py", + "ast_data": "FunctionDef name:_partitioner arg:shape arg:dtype arguments arg arg If Compare Call Raise Call Assign Call If Compare Assign Assign Assign Call Assign Assign Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "single_method_decorator", + "source_code": "def single_method_decorator(f):\n\n @parameterized.named_parameters(*params)\n @functools.wraps(f)\n def decorated(self, saved_format, *args, **kwargs):\n if saved_format == 'h5':\n _test_h5_saved_model_format(f, self, *args, **kwargs)\n elif saved_format == 'tf':\n _test_tf_saved_model_format(f, self, *args, **kwargs)\n elif saved_format == 'tf_no_traces':\n _test_tf_saved_model_format_no_traces(f, self, *args, **kwargs)\n else:\n raise ValueError('Unknown model type: %s' % (saved_format,))\n return decorated", + "docstring": "Decorator that constructs the test cases.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\keras_parameterized.py", + "ast_data": "FunctionDef name:single_method_decorator arg:f arguments arg FunctionDef name:decorated arg:self arg:saved_format arguments arg arg arg arg If Compare Call If Compare Call If Compare Call Raise Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "DTensorDistributedValue", + "source_code": "class DTensorDistributedValue(values_lib.DistributedValues):\n\n def __init__(self, dtensor):\n if context.executing_eagerly():\n if not d_api.is_dtensor(dtensor):\n raise ValueError(f'The DTensorDistributedValue can only be built with DTensor instance, got {type(dtensor)}')\n super().__init__(d_api.unpack(dtensor))\n else:\n super().__init__([dtensor])\n self._dtensor = dtensor\n\n def get_dtensor(self):\n return self._dtensor\n\n @property\n def values(self):\n return self._values", + "docstring": "DistributedValue backed by a DTensor instance. This class is useful to align the interface between DTensor and tf.distribute. Most of the tf.distribute API will accept/return DistributedValue, whereas DTensor low level API will only accept DTensor instance. In order to avoid the conversion back and forth between DistributedValue and DTensor, we introduce this class so that it can work with both side.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\dtensor_util.py", + "ast_data": "ClassDef name:DTensorDistributedValue FunctionDef name:__init__ arg:self arg:dtensor arguments arg arg If Call If Call Raise Call Call Call Call Call Call Call Assign FunctionDef name:get_dtensor arg:self arguments arg Return return:yes FunctionDef name:values arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "RequestSite", + "source_code": "class RequestSite:\n\n def __init__(self, request):\n self.domain = self.name = request.get_host()\n\n def __str__(self):\n return self.domain\n\n def save(self, force_insert=False, force_update=False):\n raise NotImplementedError('RequestSite cannot be saved.')\n\n def delete(self):\n raise NotImplementedError('RequestSite cannot be deleted.')", + "docstring": "A class that shares the primary interface of Site (i.e., it has `` attributes) but gets its data from an HttpRequest object rather than from a database. The save() and delete() methods raise NotImplementedError.", + "type": "class", + "file_path": "django\\django\\contrib\\sites\\requests.py", + "ast_data": "ClassDef name:RequestSite FunctionDef name:__init__ arg:self arg:request arguments arg arg Assign Call FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:save arg:self arg:force_insert arg:force_update arguments arg arg arg Raise Call FunctionDef name:delete arg:self arguments arg Raise Call" + }, + { + "library": "pytorch", + "name": "values", + "source_code": "def values(self) -> Iterable[Any]:\n return (self[k] for k in self._keys)", + "docstring": "Return an iterable of the ParameterDict values.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\container.py", + "ast_data": "FunctionDef name:values arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_worker_id_queue", + "source_code": "def get_worker_id_queue():\n global _WORKER_ID_QUEUE\n if _WORKER_ID_QUEUE is None:\n _WORKER_ID_QUEUE = multiprocessing.Queue()\n return _WORKER_ID_QUEUE", + "docstring": "Lazily create the queue to track worker ids.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py", + "ast_data": "FunctionDef name:get_worker_id_queue arguments If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_import_meta_graph_with_return_elements", + "source_code": "def _import_meta_graph_with_return_elements(meta_graph_or_file, clear_devices=False, import_scope=None, return_elements=None, **kwargs):\n if context.executing_eagerly():\n raise RuntimeError('Exporting/importing meta graphs is not supported when eager execution is enabled. No graph exists when eager execution is enabled.')\n if not isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):\n meta_graph_def = meta_graph.read_meta_graph_file(meta_graph_or_file)\n else:\n meta_graph_def = meta_graph_or_file\n imported_vars, imported_return_elements = meta_graph.import_scoped_meta_graph_with_return_elements(meta_graph_def, clear_devices=clear_devices, import_scope=import_scope, return_elements=return_elements, **kwargs)\n saver = _create_saver_from_imported_meta_graph(meta_graph_def, import_scope, imported_vars)\n return (saver, imported_return_elements)", + "docstring": "Import MetaGraph, and return both a saver and returned elements.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py", + "ast_data": "FunctionDef name:_import_meta_graph_with_return_elements arg:meta_graph_or_file arg:clear_devices arg:import_scope arg:return_elements arguments arg arg arg arg arg If Call Raise Call If Call Assign Call Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "_byte_order_str", + "source_code": "def _byte_order_str(dtype):\n swapped = np.dtype(int).newbyteorder('S')\n native = swapped.newbyteorder('S')\n byteorder = dtype.byteorder\n if byteorder == '=':\n return native.byteorder\n if byteorder == 'S':\n return swapped.byteorder\n elif byteorder == '|':\n return ''\n else:\n return byteorder", + "docstring": "Normalize byteorder to ''", + "type": "function", + "file_path": "numpy\\numpy\\_core\\_dtype.py", + "ast_data": "FunctionDef name:_byte_order_str arg:dtype arguments arg Assign Call Call Assign Call Assign If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "_insert_update_blklocs_and_blknos", + "source_code": "def _insert_update_blklocs_and_blknos(self, loc) -> None:\n if loc == self.blklocs.shape[0]:\n self._blklocs = np.append(self._blklocs, 0)\n self._blknos = np.append(self._blknos, len(self.blocks))\n elif loc == 0:\n self._blklocs = np.concatenate([[0], self._blklocs])\n self._blknos = np.concatenate([[len(self.blocks)], self._blknos])\n else:\n new_blklocs, new_blknos = libinternals.update_blklocs_and_blknos(self.blklocs, self.blknos, loc, len(self.blocks))\n self._blklocs = new_blklocs\n self._blknos = new_blknos", + "docstring": "When inserting a new Block at location 'loc', we update our _blklocs and _blknos.", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:_insert_update_blklocs_and_blknos arg:self arg:loc arguments arg arg If Compare Assign Call Assign Call Call If Compare Assign Call Assign Call Call Assign Call Call Assign Assign" + }, + { + "library": "matplotlib", + "name": "set_family", + "source_code": "def set_family(self, family):\n family = mpl._val_or_rc(family, 'font.family')\n if isinstance(family, str):\n family = [family]\n self._family = family", + "docstring": "Change the font family. Can be either an alias (generic name is CSS parlance), such as: 'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace', a real font name or a list of real font names. Real font names are not supported when :rc: is . Default: :rc:", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py", + "ast_data": "FunctionDef name:set_family arg:self arg:family arguments arg arg Assign Call If Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "function", + "source_code": "@doc_controls.do_not_generate_docs\ndef function(inputs, outputs, updates=None, name=None, **kwargs):\n if ops.executing_eagerly_outside_functions():\n if kwargs:\n raise ValueError('Session keyword arguments are not supported during eager execution. You passed: %s' % (kwargs,))\n if updates:\n raise ValueError('`updates` argument is not supported during eager execution. You passed: %s' % (updates,))\n from tensorflow.python.keras import models\n from tensorflow.python.keras.utils import tf_utils\n model = models.Model(inputs=inputs, outputs=outputs)\n wrap_outputs = isinstance(outputs, list) and len(outputs) == 1\n\n def func(model_inputs):\n outs = model(model_inputs)\n if wrap_outputs:\n outs = [outs]\n return tf_utils.sync_to_numpy_or_python_type(outs)\n return func\n if kwargs:\n for key in kwargs:\n if key not in tf_inspect.getfullargspec(session_module.Session.run)[0] and key not in ['inputs', 'outputs', 'updates', 'name']:\n msg = 'Invalid argument \"%s\" passed to K.function with TensorFlow backend' % key\n raise ValueError(msg)\n return GraphExecutionFunction(inputs, outputs, updates=updates, name=name, **kwargs)", + "docstring": "Instantiates a Keras function. Args: inputs: List of placeholder tensors. outputs: List of output tensors. updates: List of update ops. name: String, name of function. **kwargs: Passed to . Returns: Output values as Numpy arrays. Raises: ValueError: if invalid kwargs are passed in or if in eager execution.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:function arg:inputs arg:outputs arg:updates arg:name arguments arg arg arg arg arg If Call If Raise Call If Raise Call Assign Call Assign BoolOp Call Compare Call FunctionDef name:func arg:model_inputs arguments arg Assign Call If Assign Return return:yes Call Return return:yes If For If BoolOp Compare Call Compare Assign Raise Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "MultiOutputMixin", + "source_code": "class MultiOutputMixin:\n\n def __sklearn_tags__(self):\n tags = super().__sklearn_tags__()\n tags.target_tags.multi_output = True\n return tags", + "docstring": "Mixin to mark estimators that support multioutput.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\base.py", + "ast_data": "ClassDef name:MultiOutputMixin FunctionDef name:__sklearn_tags__ arg:self arguments arg Assign Call Call Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "index", + "source_code": "@cherrypy.expose\ndef index(self):\n return 'Hello world!'", + "docstring": "Produce HTTP response body of hello world app index URI.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\tutorial\\tut01_helloworld.py", + "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes" + }, + { + "library": "numpy", + "name": "lstrip", + "source_code": "@set_module('numpy.strings')\ndef lstrip(a, chars=None):\n if chars is None:\n return _lstrip_whitespace(a)\n return _lstrip_chars(a, chars)", + "docstring": "For each element in , return a copy with the leading characters removed. Parameters ---------- a : array-like, with `` dtype, depending on input types See Also -------- str.lstrip Examples -------- >>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.lstrip(c, 'a') array(['AaAaA', ' aA ', 'bBABba'], dtype='>> np.strings.lstrip(c, 'A') # leaves c unchanged array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> (np.strings.lstrip(c, ' ') == np.strings.lstrip(c, '')).all() np.False_ >>> (np.strings.lstrip(c, ' ') == np.strings.lstrip(c)).all() np.True_", + "type": "function", + "file_path": "numpy\\numpy\\_core\\strings.py", + "ast_data": "FunctionDef name:lstrip arg:a arg:chars arguments arg arg If Compare Return return:yes Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "_make_class_factory", + "source_code": "@functools.cache\ndef _make_class_factory(mixin_class, fmt, attr_name=None):\n\n @functools.cache\n def class_factory(axes_class):\n if issubclass(axes_class, mixin_class):\n return axes_class\n base_class = axes_class\n\n class subcls(mixin_class, base_class):\n __module__ = mixin_class.__module__\n\n def __reduce__(self):\n return (_picklable_class_constructor, (mixin_class, fmt, attr_name, base_class), self.__getstate__())\n subcls.__name__ = subcls.__qualname__ = fmt.format(base_class.__name__)\n if attr_name is not None:\n setattr(subcls, attr_name, base_class)\n return subcls\n class_factory.__module__ = mixin_class.__module__\n return class_factory", + "docstring": "Return a function that creates picklable classes inheriting from a mixin. After :: factory = _make_class_factory(FooMixin, fmt, attr_name) FooAxes = factory(Axes) `type` class always return the same subclass.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:_make_class_factory arg:mixin_class arg:fmt arg:attr_name arguments arg arg arg FunctionDef name:class_factory arg:axes_class arguments arg If Call Return return:yes Assign ClassDef name:subcls Assign FunctionDef name:__reduce__ arg:self arguments arg Return return:yes Call Assign Call If Compare Call Return return:yes Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "diag", + "source_code": "def diag(self, X):\n return self.k1.diag(X) + self.k2.diag(X)", + "docstring": "Returns the diagonal of the kernel k(X, X). The result of this method is identical to ; however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Argument to the kernel. Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X)", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:diag arg:self arg:X arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "LogSoftmax", + "source_code": "class LogSoftmax(Module):\n __constants__ = ['dim']\n dim: Optional[int]\n\n def __init__(self, dim: Optional[int]=None) -> None:\n super().__init__()\n self.dim = dim\n\n def __setstate__(self, state):\n super().__setstate__(state)\n if not hasattr(self, 'dim'):\n self.dim = None\n\n def forward(self, input: Tensor) -> Tensor:\n return F.log_softmax(input, self.dim, _stacklevel=5)\n\n def extra_repr(self):\n return f'dim={self.dim}'", + "docstring": "Applies the :math: function to an n-dimensional input Tensor. The LogSoftmax formulation can be simplified as: .. math:: \\text{LogSoftmax}(x_{i}) = \\log\\left(\\frac{\\exp(x_i) }{ \\sum_j \\exp(x_j)} \\right) Shape: - Input: :math: where means, any number of additional dimensions - Output: :math:, same shape as the input Args: dim (int): A dimension along which LogSoftmax will be computed. Returns: a Tensor of the same dimension and shape as the input with values in the range [-inf, 0) Examples:: >>> m = nn.LogSoftmax(dim=1) >>> input = torch.randn(2, 3) >>> output = m(input)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\activation.py", + "ast_data": "ClassDef name:LogSoftmax Assign FunctionDef name:__init__ arg:self arg:dim arguments arg arg Call Call Assign FunctionDef name:__setstate__ arg:self arg:state arguments arg arg Call Call If Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "set_axis", + "source_code": "def set_axis(self, labels, *, axis: Axis=0, copy: bool | lib.NoDefault=lib.no_default) -> Self:\n self._check_copy_deprecation(copy)\n return self._set_axis_nocheck(labels, axis, inplace=False)", + "docstring": "Assign desired index to given axis. Indexes for%(extended_summary_sub)s row labels can be changed by assigning a list-like or Index. Parameters ---------- labels : list-like, Index The values for the new index. axis : %(axes_single_arg)s, default 0 The axis to update. The value 0 identifies the rows. For this parameter is unused and defaults to 0. copy : bool, default False Whether to make a copy of the underlying data. .. note:: The keyword will change behavior in pandas 3.0. __ will be enabled by default, which means that all methods with a keyword will use a lazy copy mechanism to defer the copy and ignore the keyword. The keyword will be removed in a future version of pandas. You can already get the future behavior and improvements through enabling copy on write `` .. deprecated:: 3.0.0 Returns ------- %(klass)s An object of type %(klass)s. See Also -------- %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:set_axis arg:self arg:labels arguments arg arg arg arg Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_update_ctx", + "source_code": "def _update_ctx(self, attrs: DataFrame) -> None:\n if not self.index.is_unique or not self.columns.is_unique:\n raise KeyError('`Styler.apply` and `.map` are not compatible with non-unique index or columns.')\n for cn in attrs.columns:\n j = self.columns.get_loc(cn)\n ser = attrs[cn]\n for rn, c in ser.items():\n if not c or pd.isna(c):\n continue\n css_list = maybe_convert_css_to_tuples(c)\n i = self.index.get_loc(rn)\n self.ctx[i, j].extend(css_list)", + "docstring": "Update the state of the `` for data cells. Collects a mapping of {index_label: [('', ''), ..]}. Parameters ---------- attrs : DataFrame should contain strings of ': ;: ' Whitespace shouldn't matter and the final trailing ';' shouldn't matter.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\style.py", + "ast_data": "FunctionDef name:_update_ctx arg:self arg:attrs arguments arg arg If BoolOp Raise Call For Assign Call Assign For Call If BoolOp Call Assign Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "_pack_tensor", + "source_code": "def _pack_tensor(self, *tensors):\n for tensor in tensors:\n if not isinstance(tensor, (tensor_lib.Tensor, composite_tensor.CompositeTensor, variables.Variable)):\n raise ValueError('Every component to pack onto the ParallelDevice must already be a tensor, got {}. Consider running `tf.constant` or `tf.convert_to_tensor` first on literal values.'.format(tensors))\n with ops.device(self._name):\n return tpu_ops.tpu_replicated_input(inputs=tensors)", + "docstring": "Helper to pack plain-old-tensors, not structures or composites.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\parallel_device\\parallel_device.py", + "ast_data": "FunctionDef name:_pack_tensor arg:self arguments arg arg For If Call Raise Call Call With Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_MeanAggregator", + "source_code": "def _MeanAggregator(inputs, segments):\n result = []\n for inputs_i, segments_i in zip(array_ops.split(inputs, inputs.shape[0]), array_ops.split(segments, segments.shape[0])):\n means_i = math_ops.unsorted_segment_mean(inputs_i, segments_i, num_segments=math_ops.reduce_max(segments_i) + 1)\n result.append(array_ops.reshape(array_ops.gather(means_i, segments_i), [-1]))\n return array_ops_stack.stack(result, axis=0)", + "docstring": "Replaces each segment with its mean along the last axis. Specifically, each value in the tensor gets replaced by the mean value computed from the values that belong to the same segment. Args: inputs: A 2-tensor. Aggregation is done over dimension 1. segments: A 2-tensor, same shape as . Returns: The result, same shape and type as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py", + "ast_data": "FunctionDef name:_MeanAggregator arg:inputs arg:segments arguments arg arg Assign For Call Call Call Assign Call Call Call Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "split_and_operate", + "source_code": "@final\ndef split_and_operate(self, func, *args, **kwargs) -> list[Block]:\n assert self.ndim == 2 and self.shape[0] != 1\n res_blocks = []\n for nb in self._split():\n rbs = func(nb, *args, **kwargs)\n res_blocks.extend(rbs)\n return res_blocks", + "docstring": "Split the block and apply func column-by-column. Parameters ---------- func : Block method *args **kwargs Returns ------- List[Block]", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\blocks.py", + "ast_data": "FunctionDef name:split_and_operate arg:self arg:func arguments arg arg arg arg BoolOp Compare Compare Assign For Call Assign Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_metadata_routing", + "source_code": "def get_metadata_routing(self):\n router = MetadataRouter(owner=self.__class__.__name__).add(estimator=self.estimator, method_mapping=MethodMapping().add(caller='fit', callee='fit'))\n return router", + "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.4 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\multiclass.py", + "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "Griewank", + "source_code": "class Griewank(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n self.custom_bounds = [(-50, 50), (-50, 50)]\n self.global_optimum = [[0 for _ in range(self.N)]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n i = arange(1.0, np.size(x) + 1.0)\n return sum(x ** 2 / 4000) - prod(cos(x / sqrt(i))) + 1", + "docstring": "Griewank objective function. This class defines the Griewank global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Griewank}}(x) = \\frac{1}{4000}\\sum_{i=1}^n x_i^2 - \\prod_{i=1}^n\\cos\\left(\\frac{x_i}{\\sqrt{i}}\\right) + 1 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_G.py", + "ast_data": "ClassDef name:Griewank Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_get_assets_dir", + "source_code": "def _get_assets_dir(export_dir):\n return os.path.join(compat.as_text(export_dir), compat.as_text(constants.ASSETS_DIRECTORY))", + "docstring": "Return path to asset directory in the SavedModel.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model_experimental.py", + "ast_data": "FunctionDef name:_get_assets_dir arg:export_dir arguments arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "set_other_cuda_vars", + "source_code": "def set_other_cuda_vars(environ_cp):\n if environ_cp.get('TF_CUDA_CLANG') == '1':\n write_to_bazelrc('build --config=cuda_clang')\n else:\n write_to_bazelrc('build --config=cuda')", + "docstring": "Set other CUDA related variables.", + "type": "function", + "file_path": "tensorflow\\configure.py", + "ast_data": "FunctionDef name:set_other_cuda_vars arg:environ_cp arguments arg If Compare Call Call Call" + }, + { + "library": "scipy", + "name": "Mishra01", + "source_code": "class Mishra01(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([0.0] * self.N, [1.0 + 1e-09] * self.N))\n self.global_optimum = [[1.0 for _ in range(self.N)]]\n self.fglob = 2.0\n\n def fun(self, x, *args):\n self.nfev += 1\n xn = self.N - sum(x[0:-1])\n return (1 + xn) ** xn", + "docstring": "Mishra 1 objective function. This class defines the Mishra 1 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Mishra01}}(x) = (1 + x_n)^{x_n} where .. math:: x_n = n - \\sum_{i=1}^{n-1} x_i with :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py", + "ast_data": "ClassDef name:Mishra01 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "power", + "source_code": "def power(self, n, dtype=None):\n if not isscalarlike(n):\n raise NotImplementedError('input is not scalar')\n if not n:\n raise NotImplementedError('zero power is not supported as it would densify the matrix.\\nUse `np.ones(A.shape, dtype=A.dtype)` for this case.')\n data = self._deduped_data()\n if dtype is not None:\n data = data.astype(dtype, copy=False)\n return self._with_data(data ** n)", + "docstring": "This function performs element-wise power. Parameters ---------- n : scalar n is a non-zero scalar (nonzero avoids dense ones creation) If zero power is desired, special case it to use dtype : If dtype is not specified, the current dtype will be preserved. Raises ------ NotImplementedError : if n is a zero scalar If zero power is desired, special case it to use ``", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_data.py", + "ast_data": "FunctionDef name:power arg:self arg:n arg:dtype arguments arg arg arg If Call Raise Call If Raise Call Assign Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_irfft", + "source_code": "def _irfft(input_tensor, fft_length=None, name=None):\n with _ops.name_scope(name, default_name, [input_tensor, fft_length]) as name:\n input_tensor = _ops.convert_to_tensor(input_tensor, preferred_dtype=_dtypes.complex64)\n input_tensor.shape.with_rank_at_least(fft_rank)\n if input_tensor.dtype not in (_dtypes.complex64, _dtypes.complex128):\n raise ValueError('IRFFT requires tf.complex64 or tf.complex128 inputs, got: %s' % input_tensor)\n complex_dtype = input_tensor.dtype\n real_dtype = complex_dtype.real_dtype\n if fft_length is None:\n fft_length = _infer_fft_length_for_irfft(input_tensor, fft_rank)\n else:\n fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)\n input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=True)\n fft_length_static = _tensor_util.constant_value(fft_length)\n if fft_length_static is not None:\n fft_length = fft_length_static\n return ifft_fn(input_tensor, fft_length, Treal=real_dtype, name=name)", + "docstring": "Wrapper irfft* that infers fft_length argument.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\fft_ops.py", + "ast_data": "FunctionDef name:_irfft arg:input_tensor arg:fft_length arg:name arguments arg arg arg With Call Assign Call Call If Compare Raise Call Assign Assign If Compare Assign Call Assign Call Assign Call Assign Call If Compare Assign Return return:yes Call" + }, + { + "library": "seaborn", + "name": "_invert_scale", + "source_code": "def _invert_scale(self, ax, data, vars=('x', 'y')):\n for var in vars:\n _, inv = _get_transform_functions(ax, var[0])\n if var == self.orient and 'width' in data:\n hw = data['width'] / 2\n data['edge'] = inv(data[var] - hw)\n data['width'] = inv(data[var] + hw) - data['edge'].to_numpy()\n for suf in ['', 'min', 'max']:\n if (col := f'{var}{suf}') in data:\n data[col] = inv(data[col])", + "docstring": "Undo scaling after computation so data are plotted correctly.", + "type": "method", + "file_path": "seaborn\\seaborn\\categorical.py", + "ast_data": "FunctionDef name:_invert_scale arg:self arg:ax arg:data arg:vars arguments arg arg arg arg For Assign Call If BoolOp Compare Compare Assign Assign Call Assign Call Call For If Compare Assign Call" + }, + { + "library": "tensorflow", + "name": "build_tensor_info", + "source_code": "@tf_export(v1=['saved_model.build_tensor_info', 'saved_model.utils.build_tensor_info'])\n@deprecation.deprecated(None, _DEPRECATION_MSG)\ndef build_tensor_info(tensor):\n if context.executing_eagerly():\n raise RuntimeError('`build_tensor_info` is not supported in eager execution.')\n return build_tensor_info_internal(tensor)", + "docstring": "Utility function to build TensorInfo proto from a Tensor. Args: tensor: Tensor or SparseTensor whose name, dtype and shape are used to build the TensorInfo. For SparseTensors, the names of the three constituent Tensors are used. Returns: A TensorInfo protocol buffer constructed based on the supplied argument. Raises: RuntimeError: If eager execution is enabled. @compatibility(TF2) This API is not compatible with eager execution as needs to be a graph tensor, and there is no replacement for it in TensorFlow 2.x. To start writing programs using TensorFlow 2.x, please refer to the [Effective TensorFlow 2]( guide. @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\utils_impl.py", + "ast_data": "FunctionDef name:build_tensor_info arg:tensor arguments arg If Call Raise Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "get_indexof", + "source_code": "def get_indexof(insts):\n indexof = {}\n for i, inst in enumerate(insts):\n assert inst not in indexof\n indexof[inst] = i\n return indexof", + "docstring": "Get a mapping from instruction memory address to index in instruction list. Additionally checks that each instruction only appears once in the list.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_analysis.py", + "ast_data": "FunctionDef name:get_indexof arg:insts arguments arg Assign For Call Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "assert_non_positive_v2", + "source_code": "@tf_export('debugging.assert_non_positive', v1=[])\n@dispatch.add_dispatch_support\ndef assert_non_positive_v2(x, message=None, summarize=None, name=None):\n return assert_non_positive(x=x, summarize=summarize, message=message, name=name)", + "docstring": "Assert the condition holds element-wise. This Op checks that holds for every element of . If is empty, this is trivially satisfied. If is not <= 0 everywhere, , as well as the first entries of are printed, and is raised. Args: x: Numeric . message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to \"assert_non_positive\". Returns: Op raising unless is all non-positive. This can be used with inside of s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and is False. The check can be performed immediately during eager execution or if is statically known.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py", + "ast_data": "FunctionDef name:assert_non_positive_v2 arg:x arg:message arg:summarize arg:name arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "rad2deg", + "source_code": "def rad2deg(tensor: Tensor) -> Tensor:\n if not isinstance(tensor, Tensor):\n raise TypeError(f'Input type is not a Tensor. Got {type(tensor)}')\n return 180.0 * tensor / pi.to(tensor.device).type(tensor.dtype)", + "docstring": "Convert angles from radians to degrees. Args: tensor: Tensor of arbitrary shape. Returns: Tensor with same shape as input. Example: >>> input = tensor(3.1415926535) >>> rad2deg(input) tensor(180.)", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:rad2deg arg:tensor arguments arg If Call Raise Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "softmax_v2", + "source_code": "@tf_export('nn.softmax', 'math.softmax', v1=[])\n@dispatch.add_dispatch_support\ndef softmax_v2(logits, axis=None, name=None):\n if axis is None:\n axis = -1\n return _wrap_2d_function(logits, gen_nn_ops.softmax, axis, name)", + "docstring": "Computes softmax activations. Used for multi-class predictions. The sum of all outputs generated by softmax is 1. This function performs the equivalent of Example usage: >>> softmax = tf.nn.softmax([-1, 0., 1.]) >>> softmax >>> sum(softmax) Args: logits: A non-empty . Must be one of the following types: , , . axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A . Has the same type and shape as . Raises: InvalidArgumentError: if is empty or is beyond the last dimension of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:softmax_v2 arg:logits arg:axis arg:name arguments arg arg arg If Compare Assign Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "add_subfigure", + "source_code": "def add_subfigure(self, subplotspec, **kwargs):\n sf = SubFigure(self, subplotspec, **kwargs)\n self.subfigs += [sf]\n sf._remove_method = self.subfigs.remove\n sf.stale_callback = _stale_figure_callback\n self.stale = True\n return sf", + "docstring": "Add a to the figure as part of a subplot arrangement. Parameters ---------- subplotspec : Defines the region in a parent gridspec where the subfigure will be placed. Returns ------- Other Parameters ---------------- **kwargs Are passed to the object. See Also -------- .Figure.subfigures", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:add_subfigure arg:self arg:subplotspec arguments arg arg arg Assign Call Assign Assign Assign Return return:yes" + }, + { + "library": "django", + "name": "unit_attname", + "source_code": "@classmethod\ndef unit_attname(cls, unit_str):\n lower = unit_str.lower()\n if unit_str in cls.UNITS:\n return unit_str\n elif lower in cls.UNITS:\n return lower\n elif lower in cls.LALIAS:\n return cls.LALIAS[lower]\n else:\n raise AttributeError(f'Unknown unit type: {unit_str}')", + "docstring": "Retrieve the unit attribute name for the given unit string. For example, if the given unit string is 'metre', return 'm'. Raise an AttributeError if an attribute cannot be found.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\measure.py", + "ast_data": "FunctionDef name:unit_attname arg:cls arg:unit_str arguments arg arg Assign Call If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call" + }, + { + "library": "pytorch", + "name": "dispatch_torch_function", + "source_code": "def dispatch_torch_function(tx: 'InstructionTranslator', fn, args, kwargs):\n all_args = _get_all_args(args, kwargs)\n overloaded_args = _get_overloaded_args([arg for arg in all_args if has_torch_function(arg)], _get_subclass_type)\n types = TupleVariable([_get_subclass_type_var(tx, arg) for arg in overloaded_args])\n if tx.symbolic_torch_function_state.in_torch_function_mode():\n res = tx.symbolic_torch_function_state.call_torch_function_mode(tx, fn, types, args, kwargs)\n if not (isinstance(res, ConstantVariable) and res.value is NotImplemented):\n return res\n for arg in overloaded_args:\n res = arg.call_torch_function(tx, fn, types, args, kwargs)\n if not (isinstance(res, ConstantVariable) and res.value is NotImplemented):\n return res\n unimplemented_v2(gb_type='TypeError from user code', context=f'fn={fn!r}, args={args!r}, kwargs={kwargs!r}', explanation=f'All __torch_function__ overrides for for function {fn} returned NotImplemented', hints=[*graph_break_hints.USER_ERROR])", + "docstring": "Gathers all args that are TensorWithTFOverrideVariable and dispatches based on the ordering in _get_overloaded_args", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\variables\\torch_function.py", + "ast_data": "FunctionDef name:dispatch_torch_function arg:tx arg:fn arg:args arg:kwargs arguments arg arg arg arg Assign Call Assign Call Call Assign Call Call If Call Assign Call If BoolOp Call Compare Return return:yes For Assign Call If BoolOp Call Compare Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "write", + "source_code": "@tf_should_use.should_use_result(warn_in_eager=True)\ndef write(self, index, value, name=None):\n return self._implementation.write(index, value, name=name)", + "docstring": "Write into index of the TensorArray. Args: index: 0-D. int32 scalar with the index to write to. value: N-D. Tensor of type . The Tensor to write to this index. name: A name for the operation (optional). Returns: A new TensorArray object with flow that ensures the write occurs. Use this object for all subsequent operations. Raises: ValueError: if there are more writers than specified.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:write arg:self arg:index arg:value arg:name arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "BorgTerminationConfig", + "source_code": "class BorgTerminationConfig(TerminationConfig):\n\n def __init__(self, termination_watcher_fn=None, exit_fn=None, grace_period=None, save_fn=None):\n self.termination_watcher_fn = termination_watcher_fn\n default_exit_fn = lambda: sys.exit(42)\n self.exit_fn = exit_fn or default_exit_fn\n self.grace_period = grace_period or 0\n self.save_fn = save_fn", + "docstring": "Configurations for Borg.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py", + "ast_data": "ClassDef name:BorgTerminationConfig FunctionDef name:__init__ arg:self arg:termination_watcher_fn arg:exit_fn arg:grace_period arg:save_fn arguments arg arg arg arg arg Assign Assign arguments Call Assign BoolOp Assign BoolOp Assign" + }, + { + "library": "pytorch", + "name": "matmul", + "source_code": "def matmul(self, decomposed_weight, activation):\n rows1 = activation.size(dim=0)\n rows2 = decomposed_weight.shape[0]\n cols2 = decomposed_weight.shape[1]\n result = torch.zeros(rows1, cols2)\n for i in range(rows1):\n for j in range(cols2):\n for k in range(rows2):\n weight_val = decomposed_weight[k][j]\n r = int(activation[i][k])\n product = self.bitshift_mul(weight_val, r)\n result[i][j] += product\n return result", + "docstring": "Perform matrix multiplication between decomposed_weight and activation by calling bitshift_mul function for each value Args: decomposed_weight (Tensor): APoT quantized weight decomposed into binary activation (Tensor): uniformly quantized activation", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\experimental\\linear.py", + "ast_data": "FunctionDef name:matmul arg:self arg:decomposed_weight arg:activation arguments arg arg arg Assign Call Assign Assign Assign Call For Call For Call For Call Assign Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "init_step_count", + "source_code": "@classmethod\ndef init_step_count(cls, requester: str):\n cls._step_dict[requester] = cls._current_step", + "docstring": "Initialize for a given requester.", + "type": "method", + "file_path": "pytorch\\torch\\autograd\\profiler.py", + "ast_data": "FunctionDef name:init_step_count arg:cls arg:requester arguments arg arg Assign" + }, + { + "library": "scipy", + "name": "Beale", + "source_code": "class Beale(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-4.5] * self.N, [4.5] * self.N))\n self.global_optimum = [[3.0, 0.5]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n return (1.5 - x[0] + x[0] * x[1]) ** 2 + (2.25 - x[0] + x[0] * x[1] ** 2) ** 2 + (2.625 - x[0] + x[0] * x[1] ** 3) ** 2", + "docstring": "Beale objective function. The Beale [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Beale}}(x) = \\left(x_1 x_2 - x_1 + 1.5\\right)^{2} + \\left(x_1 x_2^{2} - x_1 + 2.25\\right)^{2} + \\left(x_1 x_2^{3} - x_1 + 2.625\\right)^{2} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py", + "ast_data": "ClassDef name:Beale FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "indicate_inset_zoom", + "source_code": "def indicate_inset_zoom(self, inset_ax, **kwargs):\n return self.indicate_inset(None, inset_ax, **kwargs)", + "docstring": "Add an inset indicator rectangle to the Axes based on the axis limits for an *inset_ax* and draw connectors between *inset_ax* and the rectangle. Warnings -------- This method is experimental as of 3.0, and the API may change. Parameters ---------- inset_ax : Inset Axes to draw connecting lines to. Two lines are drawn connecting the indicator box to the inset Axes on corners chosen so as to not overlap with the indicator box. **kwargs Other keyword arguments are passed on to Returns ------- inset_indicator : An artist which contains inset_indicator.rectangle : The indicator frame. inset_indicator.connectors : 4-tuple of The four connector lines connecting to (lower_left, upper_left, lower_right upper_right) corners of *inset_ax*. Two lines are set with visibility to *False*, but the user can set the visibility to True if the automatic choice is not deemed correct. .. versionchanged:: 3.10 Previously the rectangle and connectors tuple were returned.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", + "ast_data": "FunctionDef name:indicate_inset_zoom arg:self arg:inset_ax arguments arg arg arg Return return:yes Call" + }, + { + "library": "seaborn", + "name": "scatterplot", + "source_code": "def scatterplot(self, ax, kws):\n line_markers = ['1', '2', '3', '4', '+', 'x', '|', '_']\n if self.x_estimator is None:\n if 'marker' in kws and kws['marker'] in line_markers:\n lw = mpl.rcParams['lines.linewidth']\n else:\n lw = mpl.rcParams['lines.markeredgewidth']\n kws.setdefault('linewidths', lw)\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault('alpha', 0.8)\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n ci_kws = {'color': kws['color']}\n if 'alpha' in kws:\n ci_kws['alpha'] = kws['alpha']\n ci_kws['linewidth'] = mpl.rcParams['lines.linewidth'] * 1.75\n kws.setdefault('s', 50)\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)", + "docstring": "Draw the data.", + "type": "method", + "file_path": "seaborn\\seaborn\\regression.py", + "ast_data": "FunctionDef name:scatterplot arg:self arg:ax arg:kws arguments arg arg arg Assign If Compare If BoolOp Compare Compare Assign Assign Call If BoolOp Call Compare Call Assign Call Assign If Compare Assign Assign Call Assign If Compare For Call Call Call" + }, + { + "library": "pytorch", + "name": "get_mutable_args_from_schema", + "source_code": "def get_mutable_args_from_schema(schema: torch.FunctionSchema) -> tuple[list[str], list[torch.Type]]:\n mutable_args_names = [arg.name for arg in schema.arguments if arg.alias_info is not None and arg.alias_info.is_write]\n mutable_args_types = [arg.type for arg in schema.arguments if arg.alias_info is not None and arg.alias_info.is_write]\n return (mutable_args_names, mutable_args_types)", + "docstring": "Returns the list of argument names that get mutated according to the schema and their types.", + "type": "function", + "file_path": "pytorch\\torch\\_higher_order_ops\\auto_functionalize.py", + "ast_data": "FunctionDef name:get_mutable_args_from_schema arg:schema arguments arg Assign BoolOp Compare Assign BoolOp Compare Return return:yes" + }, + { + "library": "django", + "name": "clear_select_clause", + "source_code": "def clear_select_clause(self):\n self.select = ()\n self.default_cols = False\n self.select_related = False\n self.set_extra_mask(())\n self.set_annotation_mask(())\n self.selected = None", + "docstring": "Remove all fields from SELECT clause.", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\query.py", + "ast_data": "FunctionDef name:clear_select_clause arg:self arguments arg Assign Assign Assign Call Call Assign" + }, + { + "library": "tensorflow", + "name": "AddMetric", + "source_code": "class AddMetric(Layer):\n\n def __init__(self, aggregation=None, metric_name=None, **kwargs):\n super(AddMetric, self).__init__(**kwargs)\n self.aggregation = aggregation\n self.metric_name = metric_name\n\n def call(self, inputs):\n self.add_metric(inputs, aggregation=self.aggregation, name=self.metric_name)\n return inputs\n\n def get_config(self):\n config = super(AddMetric, self).get_config()\n config.update({'aggregation': self.aggregation, 'metric_name': self.metric_name})\n return config", + "docstring": "Adds its inputs as a metric. Attributes: aggregation: 'mean' or None. How the inputs should be aggregated. metric_name: The name to use for this metric.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "ClassDef name:AddMetric FunctionDef name:__init__ arg:self arg:aggregation arg:metric_name arguments arg arg arg arg Call Call Assign Assign FunctionDef name:call arg:self arg:inputs arguments arg arg Call Return return:yes FunctionDef name:get_config arg:self arguments arg Assign Call Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "__getitem__", + "source_code": "def __getitem__(self, indx):\n m = self._mask\n if isinstance(m[indx], ndarray):\n return masked_array(data=self._data[indx], mask=m[indx], fill_value=self._fill_value[indx], hard_mask=self._hardmask)\n if m is not nomask and m[indx]:\n return masked\n return self._data[indx]", + "docstring": "Get the index.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:indx arguments arg arg Assign If Call Return return:yes Call If BoolOp Compare Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "inverse", + "source_code": "def inverse(self, value):\n if not self.scaled():\n raise ValueError('Not invertible until both vmin and vmax are set')\n (vmin,), _ = self.process_value(self.vmin)\n (vmax,), _ = self.process_value(self.vmax)\n if np.iterable(value):\n val = np.ma.asarray(value)\n return vmin + val * (vmax - vmin)\n else:\n return vmin + value * (vmax - vmin)", + "docstring": "Maps the normalized value (i.e., index in the colormap) back to image data value. Parameters ---------- value Normalized value.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:inverse arg:self arg:value arguments arg arg If Call Raise Call Assign Call Assign Call If Call Assign Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "perplexity", + "source_code": "def perplexity(self) -> Tensor:\n return torch.exp(self.entropy())", + "docstring": "Returns perplexity of distribution, batched over batch_shape. Returns: Tensor of shape batch_shape.", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\distribution.py", + "ast_data": "FunctionDef name:perplexity arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "decr_version", + "source_code": "def decr_version(self, key, delta=1, version=None):\n return self.incr_version(key, -delta, version)", + "docstring": "Subtract delta from the cache version for the supplied key. Return the new version.", + "type": "method", + "file_path": "django\\django\\core\\cache\\backends\\base.py", + "ast_data": "FunctionDef name:decr_version arg:self arg:key arg:delta arg:version arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "close", + "source_code": "def close(self, cancel_pending_enqueues=False, name=None):\n if name is None:\n name = '%s_Close' % self._name\n if self._queue_ref.dtype == _dtypes.resource:\n return gen_data_flow_ops.queue_close_v2(self._queue_ref, cancel_pending_enqueues=cancel_pending_enqueues, name=name)\n else:\n return gen_data_flow_ops.queue_close(self._queue_ref, cancel_pending_enqueues=cancel_pending_enqueues, name=name)", + "docstring": "Closes this queue. This operation signals that no more elements will be enqueued in the given queue. Subsequent and operations will fail. Subsequent and operations will continue to succeed if sufficient elements remain in the queue. Subsequently, dequeue and dequeue_many operations that would otherwise block waiting for more elements (if close hadn't been called) will now fail immediately. If is , all pending requests will also be canceled. >>> q = tf.queue.FIFOQueue(capacity=3, dtypes=tf.int32) >>> q.is_closed() >>> q.close() >>> q.is_closed() Args: cancel_pending_enqueues: (Optional.) A boolean, defaulting to (described above). name: A name for the operation (optional). Returns: The operation that closes the queue.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", + "ast_data": "FunctionDef name:close arg:self arg:cancel_pending_enqueues arg:name arguments arg arg arg If Compare Assign If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_scaled_loss", + "source_code": "def get_scaled_loss(self, loss):\n if callable(loss):\n\n def new_loss():\n loss_val = loss()\n return loss_val * math_ops.cast(self.loss_scale, loss_val.dtype)\n return new_loss\n else:\n return loss * math_ops.cast(self.loss_scale, loss.dtype)", + "docstring": "Scales the loss by the loss scale. This method is only needed if you compute gradients manually, e.g. with . In that case, call this method to scale the loss before passing the loss to . If you use or , loss scaling is automatically applied and this method is unneeded. If this method is called, should also be called. See the doc for an example. Args: loss: The loss, which will be multiplied by the loss scale. Can either be a tensor or a callable returning a tensor. Returns: multiplied by .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py", + "ast_data": "FunctionDef name:get_scaled_loss arg:self arg:loss arguments arg arg If Call FunctionDef name:new_loss arguments Assign Call Return return:yes Call Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_scan", + "source_code": "def _scan(elems):\n num_elems = elems[0].shape[dim]\n if num_elems < 2:\n return elems\n reduced_elems = call_operator(*[aten.slice(elem, dim, 0, -1, 2) for elem in elems], *[aten.slice(elem, dim, 1, None, 2) for elem in elems], *additional_inputs)\n odd_elems = _scan(reduced_elems)\n if num_elems % 2 == 0:\n even_elems = call_operator(*[aten.slice(e, dim, 0, -1) for e in odd_elems], *[aten.slice(e, dim, 2, None, 2) for e in elems], *additional_inputs)\n else:\n even_elems = call_operator(*odd_elems, *[aten.slice(e, dim, 2, None, 2) for e in elems], *additional_inputs)\n even_elems = [torch.cat([aten.slice(elem, dim, 0, 1), result], dim=dim) if result.shape.numel() > 0 and elem.shape[dim] > 0 else result if result.shape.numel() > 0 else aten.slice(elem, dim, 0, 1) for elem, result in zip(elems, even_elems)]\n return list(safe_map(functools.partial(_interleave, dim=dim), even_elems, odd_elems))", + "docstring": "Perform the actual recursive scan on ``.", + "type": "function", + "file_path": "pytorch\\torch\\_higher_order_ops\\associative_scan.py", + "ast_data": "FunctionDef name:_scan arg:elems arguments arg Assign If Compare Return return:yes Assign Call Call Call Assign Call If Compare Assign Call Call Call Assign Call Call Assign BoolOp Compare Call Compare Call Call Compare Call Call Call Return return:yes Call Call Call" + }, + { + "library": "numpy", + "name": "default_device", + "source_code": "def default_device(self):\n return 'cpu'", + "docstring": "The default device used for new NumPy arrays. For NumPy, this always returns ``. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- device : str The default device used for new NumPy arrays. Examples -------- >>> info = np.__array_namespace_info__() >>> info.default_device() 'cpu'", + "type": "method", + "file_path": "numpy\\numpy\\_array_api_info.py", + "ast_data": "FunctionDef name:default_device arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_dense_tensor_internal", + "source_code": "def _get_dense_tensor_internal(self, transformation_cache, state_manager):\n with ops.name_scope(None, default_name=self.name):\n sparse_tensors = self.categorical_column.get_sparse_tensors(transformation_cache, state_manager)\n sparse_ids = sparse_tensors.id_tensor\n sparse_weights = sparse_tensors.weight_tensor\n embedding_weights = self.shared_embedding_column_creator.embedding_weights\n sparse_id_rank = tensor_shape.dimension_value(sparse_ids.dense_shape.get_shape()[0])\n embedding_lookup_sparse = embedding_ops.safe_embedding_lookup_sparse\n if not self.use_safe_embedding_lookup and sparse_id_rank is not None and (sparse_id_rank <= 2):\n embedding_lookup_sparse = embedding_ops.embedding_lookup_sparse_v2\n return embedding_lookup_sparse(embedding_weights, sparse_ids, sparse_weights, combiner=self.combiner, name='%s_weights' % self.name, max_norm=self.max_norm)", + "docstring": "Private method that follows the signature of _get_dense_tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:_get_dense_tensor_internal arg:self arg:transformation_cache arg:state_manager arguments arg arg arg With Call Assign Call Assign Assign Assign Assign Call Call Assign If BoolOp Compare Compare Assign Return return:yes Call" + }, + { + "library": "django", + "name": "supports_explaining_query_execution", + "source_code": "@cached_property\ndef supports_explaining_query_execution(self):\n return self.connection.ops.explain_prefix is not None", + "docstring": "Does this backend support explaining query execution?", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\features.py", + "ast_data": "FunctionDef name:supports_explaining_query_execution arg:self arguments arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "_distribute_strategy", + "source_code": "@property\ndef _distribute_strategy(self):\n return None", + "docstring": "The that this variable was created under.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:_distribute_strategy arg:self arguments arg Return return:no" + }, + { + "library": "authlib", + "name": "validate_no_multiple_request_parameter", + "source_code": "@staticmethod\ndef validate_no_multiple_request_parameter(request: OAuth2Request):\n datalist = request.payload.datalist\n parameters = ['response_type', 'client_id', 'redirect_uri', 'scope', 'state']\n for param in parameters:\n if len(datalist.get(param, [])) > 1:\n raise InvalidRequestError(f\"Multiple '{param}' in request.\", state=request.payload.state)", + "docstring": "For the Authorization Endpoint, request and response parameters MUST NOT be included more than once. Per _. .. _:", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\base.py", + "ast_data": "FunctionDef name:validate_no_multiple_request_parameter arg:request arguments arg Assign Assign For If Compare Call Call Raise Call" + }, + { + "library": "pytorch", + "name": "set_epoch", + "source_code": "def set_epoch(self, epoch: int) -> None:\n self.epoch = epoch", + "docstring": "Set the epoch for this sampler. When :attr:, this ensures all replicas use a different random ordering for each epoch. Otherwise, the next iteration of this sampler will yield the same ordering. Args: epoch (int): Epoch number.", + "type": "method", + "file_path": "pytorch\\torch\\utils\\data\\distributed.py", + "ast_data": "FunctionDef name:set_epoch arg:self arg:epoch arguments arg arg Assign" + }, + { + "library": "scipy", + "name": "hdmedian", + "source_code": "def hdmedian(data, axis=-1, var=False):\n result = hdquantiles(data, [0.5], axis=axis, var=var)\n return result.squeeze()", + "docstring": "Returns the Harrell-Davis estimate of the median along the given axis. Parameters ---------- data : ndarray Data array. axis : int, optional Axis along which to compute the quantiles. If None, use a flattened array. var : bool, optional Whether to return the variance of the estimate. Returns ------- hdmedian : MaskedArray The median values. If ``, the variance is returned inside the masked array. E.g. for a 1-D array the shape change from (1,) to (2,).", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mstats_extras.py", + "ast_data": "FunctionDef name:hdmedian arg:data arg:axis arg:var arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "signature_from_str", + "source_code": "def signature_from_str(signature: str) -> Signature:\n code = 'def func' + signature + ': pass'\n module = ast.parse(code)\n function = typing.cast('ast.FunctionDef', module.body[0])\n return signature_from_ast(function, code)", + "docstring": "Create a :class: object from a string.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\inspect.py", + "ast_data": "FunctionDef name:signature_from_str arg:signature arguments arg Assign Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "is_supported_container", + "source_code": "def is_supported_container(self, X):\n pass", + "docstring": "Return True if X is a supported container. Parameters ---------- Xs: container Containers to be checked. Returns ------- is_supported_container : bool True if X is a supported container.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\utils\\_set_output.py", + "ast_data": "FunctionDef name:is_supported_container arg:self arg:X arguments arg arg" + }, + { + "library": "tensorflow", + "name": "get_raw_handle", + "source_code": "def get_raw_handle(self):\n self._auto_gc_enabled = False\n return self._handle", + "docstring": "Return the raw handle of the tensor. Note that the method disables the automatic garbage collection of this persistent tensor. The caller is now responsible for managing the life time of the tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py", + "ast_data": "FunctionDef name:get_raw_handle arg:self arguments arg Assign Return return:yes" + }, + { + "library": "pandas", + "name": "_should_convert_dates", + "source_code": "def _should_convert_dates(convert_dates: bool | list[str], keep_default_dates: bool, col: Hashable) -> bool:\n if convert_dates is False:\n return False\n elif not isinstance(convert_dates, bool) and col in set(convert_dates):\n return True\n elif not keep_default_dates:\n return False\n elif not isinstance(col, str):\n return False\n col_lower = col.lower()\n if col_lower.endswith(('_at', '_time')) or col_lower in {'modified', 'date', 'datetime'} or col_lower.startswith('timestamp'):\n return True\n return False", + "docstring": "Return bool whether a DataFrame column should be cast to datetime.", + "type": "function", + "file_path": "pandas\\pandas\\io\\json\\_json.py", + "ast_data": "FunctionDef name:_should_convert_dates arg:convert_dates arg:keep_default_dates arg:col arguments arg arg arg If Compare Return return:yes If BoolOp Call Compare Call Return return:yes If Return return:yes If Call Return return:yes Assign Call If BoolOp Call Compare Call Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "devices", + "source_code": "def devices(self) -> list[_Device]:\n return ['cpu', _DASK_DEVICE]", + "docstring": "The devices supported by Dask. For Dask, this always returns ``. Returns ------- devices : list[Device] The devices supported by Dask. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes Examples -------- >>> info = xp.__array_namespace_info__() >>> info.devices() ['cpu', DASK_DEVICE]", + "type": "method", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_info.py", + "ast_data": "FunctionDef name:devices arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_embedding_table_size", + "source_code": "def get_embedding_table_size(self):\n return (self.categorical_column._num_buckets, self.dimension)", + "docstring": "Returns num_ids and width.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py", + "ast_data": "FunctionDef name:get_embedding_table_size arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "diag_update", + "source_code": "@property\ndef diag_update(self):\n return self._diag_update", + "docstring": "If this operator is , this is the diagonal of .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_low_rank_update.py", + "ast_data": "FunctionDef name:diag_update arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_record_matching_score", + "source_code": "def _record_matching_score(self, inputs: Sequence[fx_type_utils.TensorLike | str | int | float | bool | list | complex | None], attributes: dict[str, fx_type_utils.Argument]):\n self._matching_score = 0\n for schema_input, torch_input in zip(self.op_schema.inputs, inputs):\n torch_input_compatible_types = _find_onnx_data_type(torch_input)\n allowed_types = self.type_constraints[schema_input.type_str]\n if allowed_types.intersection(torch_input_compatible_types):\n self._matching_score += 1\n for attribute_name, attribute_proto in self.attributes.items():\n attribute = attributes[attribute_name]\n attribute_onnx_type = fx_type_utils.from_python_type_to_onnx_attribute_type(type(attribute))\n if attribute_onnx_type != attribute_proto.type:\n self._matching_score -= 1", + "docstring": "Calculate the inputs matching score of the OpSchema requirements to find the nearest match. Only the functions which have the same number of inputs and attributes as the OpSchema are eligible to be a nearest match candidate. Thus, we don't need to check the length of inputs and attributes here, and only check the types of inputs and attributes. How the matchsing score is calculated: score += 1 if one input/attribute type is in the type constraints. Limitations: None/NoeType/[] could result in zero matches, and the same score of overloads. Args: inputs: The input arguments. attributes: The input keyword arguments. Returns: True if the inputs match the requirements, False otherwise.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\onnxfunction_dispatcher.py", + "ast_data": "FunctionDef name:_record_matching_score arg:self arg:inputs arg:attributes arguments arg arg arg Assign For Call Assign Call Assign If Call For Call Assign Assign Call Call If Compare" + }, + { + "library": "scipy", + "name": "_design_notch_peak_filter", + "source_code": "def _design_notch_peak_filter(w0, Q, ftype, fs=2.0):\n fs = _validate_fs(fs, allow_none=False)\n w0 = float(w0)\n Q = float(Q)\n w0 = 2 * w0 / fs\n if w0 > 1.0 or w0 < 0.0:\n raise ValueError('w0 should be such that 0 < w0 < 1')\n bw = w0 / Q\n bw = bw * np.pi\n w0 = w0 * np.pi\n if ftype not in ('notch', 'peak'):\n raise ValueError('Unknown ftype.')\n beta = np.tan(bw / 2.0)\n gain = 1.0 / (1.0 + beta)\n if ftype == 'notch':\n b = gain * np.array([1.0, -2.0 * np.cos(w0), 1.0])\n else:\n b = (1.0 - gain) * np.array([1.0, 0.0, -1.0])\n a = np.array([1.0, -2.0 * gain * np.cos(w0), 2.0 * gain - 1.0])\n return (b, a)", + "docstring": "Design notch or peak digital filter. Parameters ---------- w0 : float Normalized frequency to remove from a signal. If is specified, this is in the same units as . By default, it is a normalized scalar that must satisfy ``) polynomials of the IIR filter.", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_filter_design.py", + "ast_data": "FunctionDef name:_design_notch_peak_filter arg:w0 arg:Q arg:ftype arg:fs arguments arg arg arg arg Assign Call Assign Call Assign Call Assign If BoolOp Compare Compare Raise Call Assign Assign Assign If Compare Raise Call Assign Call Assign If Compare Assign Call Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "load_pluggable_device_library", + "source_code": "def load_pluggable_device_library(library_location):\n if os.path.exists(library_location):\n if os.path.isdir(library_location):\n directory_contents = os.listdir(library_location)\n pluggable_device_libraries = [os.path.join(library_location, f) for f in directory_contents if _is_shared_object(f)]\n else:\n pluggable_device_libraries = [library_location]\n for lib in pluggable_device_libraries:\n py_tf.TF_LoadPluggableDeviceLibrary(lib)\n context.context().reinitialize_physical_devices()\n else:\n raise OSError(errno.ENOENT, 'The file or folder to load pluggable device libraries from does not exist.', library_location)", + "docstring": "Loads a TensorFlow PluggableDevice plugin. \"library_location\" can be a path to a specific shared object, or a folder. If it is a folder, all shared objects will be loaded. when the library is loaded, devices/kernels registered in the library via StreamExecutor C API and Kernel/Op Registration C API are made available in TensorFlow process. Args: library_location: Path to the plugin or folder of plugins. Relative or absolute filesystem path to a dynamic library file or folder. Raises: OSError: When the file to be loaded is not found. RuntimeError: when unable to load the library.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\load_library.py", + "ast_data": "FunctionDef name:load_pluggable_device_library arg:library_location arguments arg If Call If Call Assign Call Assign Call Call Assign For Call Call Call Raise Call" + }, + { + "library": "pytorch", + "name": "_GreaterThan", + "source_code": "class _GreaterThan(Constraint):\n\n def __init__(self, lower_bound):\n self.lower_bound = lower_bound\n super().__init__()\n\n def check(self, value):\n return self.lower_bound < value\n\n def __repr__(self):\n fmt_string = self.__class__.__name__[1:]\n fmt_string += f'(lower_bound={self.lower_bound})'\n return fmt_string", + "docstring": "Constrain to a real half line .", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\constraints.py", + "ast_data": "ClassDef name:_GreaterThan FunctionDef name:__init__ arg:self arg:lower_bound arguments arg arg Assign Call Call FunctionDef name:check arg:self arg:value arguments arg arg Return return:yes Compare FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes" + }, + { + "library": "scipy", + "name": "Rosenbrock", + "source_code": "class Rosenbrock(Benchmark):\n change_dimensionality = True\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-30.0] * self.N, [30.0] * self.N))\n self.custom_bounds = [(-2, 2), (-2, 2)]\n self.global_optimum = [[1 for _ in range(self.N)]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n return rosen(x)", + "docstring": "Rosenbrock objective function. This class defines the Rosenbrock [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Rosenbrock}}(x) = \\sum_{i=1}^{n-1} [100(x_i^2 - x_{i+1})^2 + (x_i - 1)^2] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_R.py", + "ast_data": "ClassDef name:Rosenbrock Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "BaseYearArchiveView", + "source_code": "class BaseYearArchiveView(YearMixin, BaseDateListView):\n date_list_period = 'month'\n make_object_list = False\n\n def get_dated_items(self):\n year = self.get_year()\n date_field = self.get_date_field()\n date = _date_from_string(year, self.get_year_format())\n since = self._make_date_lookup_arg(date)\n until = self._make_date_lookup_arg(self._get_next_year(date))\n lookup_kwargs = {'%s__gte' % date_field: since, '%s__lt' % date_field: until}\n qs = self.get_dated_queryset(**lookup_kwargs)\n date_list = self.get_date_list(qs)\n if not self.get_make_object_list():\n qs = qs.none()\n return (date_list, qs, {'year': date, 'next_year': self.get_next_year(date), 'previous_year': self.get_previous_year(date)})\n\n def get_make_object_list(self):\n return self.make_object_list", + "docstring": "Base view for a list of objects published in a given year. This requires subclassing to provide a response mixin.", + "type": "class", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "ClassDef name:BaseYearArchiveView Assign Assign FunctionDef name:get_dated_items arg:self arguments arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Assign Assign Call Assign Call If Call Assign Call Return return:yes Call Call FunctionDef name:get_make_object_list arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "track_usage", + "source_code": "def track_usage(tool_id, tags):\n del tool_id, tags", + "docstring": "No usage tracking for external library. Args: tool_id: A string identifier for tool to be tracked. tags: list of string tags that will be added to the tracking.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\platform\\analytics.py", + "ast_data": "FunctionDef name:track_usage arg:tool_id arg:tags arguments arg arg" + }, + { + "library": "matplotlib", + "name": "_ensure_locator_exists", + "source_code": "def _ensure_locator_exists(self, N):\n if self.locator is None:\n if self.logscale:\n self.locator = ticker.LogLocator(numticks=N)\n else:\n if N is None:\n N = 7\n self.locator = ticker.MaxNLocator(N + 1, min_n_ticks=1)", + "docstring": "Set a locator on this ContourSet if it's not already set. Parameters ---------- N : int or None If *N* is an int, it is used as the target number of levels. Otherwise when *N* is None, a reasonable default is chosen; for logscales the LogLocator chooses, N=7 is the default otherwise.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\contour.py", + "ast_data": "FunctionDef name:_ensure_locator_exists arg:self arg:N arguments arg arg If Compare If Assign Call If Compare Assign Assign Call" + }, + { + "library": "seaborn", + "name": "get_mapping", + "source_code": "def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n boolean_scale = isinstance(scale, Boolean)\n order = getattr(scale, 'order', [True, False] if boolean_scale else None)\n levels = categorical_order(data, order)\n values = self._get_values(scale, levels)\n if boolean_scale:\n values = values[::-1]\n\n def mapping(x):\n ixs = np.asarray(np.nan_to_num(x), np.intp)\n return [values[ix] if np.isfinite(x_i) else False for x_i, ix in zip(x, ixs)]\n return mapping", + "docstring": "Return a function that maps each data value to True or False.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\properties.py", + "ast_data": "FunctionDef name:get_mapping arg:self arg:scale arg:data arguments arg arg arg Assign Call Assign Call Assign Call Assign Call If Assign FunctionDef name:mapping arg:x arguments arg Assign Call Call Return return:yes Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "align_labels", + "source_code": "def align_labels(self, axs=None):\n self.align_xlabels(axs=axs)\n self.align_ylabels(axs=axs)", + "docstring": "Align the xlabels and ylabels of subplots with the same subplots row or column (respectively) if label alignment is being done automatically (i.e. the label position is not manually set). Alignment persists for draw events after this is called. Parameters ---------- axs : list of Optional list (or ) of to align the labels. Default is to align all Axes on the figure. See Also -------- matplotlib.figure.Figure.align_xlabels matplotlib.figure.Figure.align_ylabels matplotlib.figure.Figure.align_titles Notes ----- This assumes that all Axes in `.GridSpec.SubplotSpec` positions correspond to figure positions.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\figure.py", + "ast_data": "FunctionDef name:align_labels arg:self arg:axs arguments arg arg Call Call" + }, + { + "library": "tensorflow", + "name": "LossFunctionWrapper", + "source_code": "class LossFunctionWrapper(Loss):\n\n def __init__(self, fn, reduction=losses_utils.ReductionV2.AUTO, name=None, **kwargs):\n super().__init__(reduction=reduction, name=name)\n self.fn = fn\n self._fn_kwargs = kwargs\n\n def call(self, y_true, y_pred):\n if tensor_util.is_tf_type(y_pred) and tensor_util.is_tf_type(y_true):\n y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)\n ag_fn = autograph.tf_convert(self.fn, ag_ctx.control_status_ctx())\n return ag_fn(y_true, y_pred, **self._fn_kwargs)\n\n def get_config(self):\n config = {}\n for k, v in self._fn_kwargs.items():\n config[k] = backend.eval(v) if tf_utils.is_tensor_or_variable(v) else v\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))", + "docstring": "Wraps a loss function in the class.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "ClassDef name:LossFunctionWrapper FunctionDef name:__init__ arg:self arg:fn arg:reduction arg:name arguments arg arg arg arg arg Call Call Assign Assign FunctionDef name:call arg:self arg:y_true arg:y_pred arguments arg arg arg If BoolOp Call Call Assign Call Assign Call Call Return return:yes Call FunctionDef name:get_config arg:self arguments arg Assign For Call Assign Call Call Assign Call Call Return return:yes Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "RemapOperatorType", + "source_code": "def RemapOperatorType(operator_type):\n old_to_new = {'PoolOptions': 'Pool2DOptions', 'DepthwiseConvolutionOptions': 'DepthwiseConv2DOptions', 'ConvolutionOptions': 'Conv2DOptions', 'LocalResponseNormOptions': 'LocalResponseNormalizationOptions', 'BasicRNNOptions': 'RNNOptions'}\n return old_to_new[operator_type] if operator_type in old_to_new else operator_type", + "docstring": "Remap operator structs from old names to new names. Args: operator_type: String representing the builtin operator data type string. (see :schema.fbs). Raises: ValueError: When the model has consistency problems. Returns: Upgraded builtin operator data type as a string.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\schema\\upgrade_schema.py", + "ast_data": "FunctionDef name:RemapOperatorType arg:operator_type arguments arg Assign Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "_variable_call", + "source_code": "@classmethod\ndef _variable_call(cls, initial_value=None, trainable=None, validate_shape=True, caching_device=None, name=None, variable_def=None, dtype=None, import_scope=None, constraint=None, synchronization=variables.VariableSynchronization.AUTO, aggregation=variables.VariableAggregation.NONE, shape=None, experimental_enable_variable_lifting=None, expected_shape=None, collections=None, use_resource=None, **kwargs):\n if cls is not VariableV1:\n return None\n previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)\n for _, getter in ops.get_default_graph()._variable_creator_stack:\n previous_getter = variables._make_getter(getter, previous_getter)\n if aggregation is None:\n aggregation = variables.VariableAggregation.NONE\n return previous_getter(initial_value=initial_value, trainable=trainable, validate_shape=validate_shape, caching_device=caching_device, name=name, variable_def=variable_def, dtype=dtype, import_scope=import_scope, constraint=constraint, synchronization=synchronization, aggregation=aggregation, shape=shape, experimental_enable_variable_lifting=experimental_enable_variable_lifting, expected_shape=expected_shape, collections=collections, use_resource=use_resource)", + "docstring": "VariableV1 class getter. Useful to force the signature.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_v1.py", + "ast_data": "FunctionDef name:_variable_call arg:cls arg:initial_value arg:trainable arg:validate_shape arg:caching_device arg:name arg:variable_def arg:dtype arg:import_scope arg:constraint arg:synchronization arg:aggregation arg:shape arg:experimental_enable_variable_lifting arg:expected_shape arg:collections arg:use_resource arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg If Compare Return return:no Assign arguments arg Call For Call Assign Call If Compare Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "theilslopes", + "source_code": "def theilslopes(y, x=None, alpha=0.95, method='separate'):\n y = ma.asarray(y).flatten()\n if x is None:\n x = ma.arange(len(y), dtype=float)\n else:\n x = ma.asarray(x).flatten()\n if len(x) != len(y):\n raise ValueError(f'Incompatible lengths ! ({len(y)}<>{len(x)})')\n m = ma.mask_or(ma.getmask(x), ma.getmask(y))\n y._mask = x._mask = m\n y = y.compressed()\n x = x.compressed().astype(float)\n return stats_theilslopes(y, x, alpha=alpha, method=method)", + "docstring": "Computes the Theil-Sen estimator for a set of points (x, y). implements a method for robust linear regression. It computes the slope as the median of all slopes between paired values. Parameters ---------- y : array_like Dependent variable. x : array_like or None, optional Independent variable. If None, use `alphaslopeslopetheilslopesscipy.stats.theilslopes`.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", + "ast_data": "FunctionDef name:theilslopes arg:y arg:x arg:alpha arg:method arguments arg arg arg arg Assign Call Call If Compare Assign Call Call Assign Call Call If Compare Call Call Raise Call Call Call Assign Call Call Call Assign Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "set_module_name_object_type_order", + "source_code": "def set_module_name_object_type_order(self, module_name: str, object_type: Callable, index: int, qconfig_list: list[QConfigAny]) -> QConfigMultiMapping:\n self._insert_qconfig_list('module_name_object_type_order_qconfigs', [module_name, object_type, index], qconfig_list)\n return self", + "docstring": "Set module_name QConfigs see :func: for more info", + "type": "method", + "file_path": "pytorch\\torch\\ao\\ns\\fx\\qconfig_multi_mapping.py", + "ast_data": "FunctionDef name:set_module_name_object_type_order arg:self arg:module_name arg:object_type arg:index arg:qconfig_list arguments arg arg arg arg arg Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_maybe_build_distributed_table", + "source_code": "def _maybe_build_distributed_table(self):\n with self._distributed_table_creation_lock:\n if not self._distributed_table:\n\n def create_copy():\n new_table = self._wrapped_creator()\n with self._has_resource_functions:\n while not hasattr(self, '_restored_function') or any((method not in self._restored_function for method in TRACKABLE_RESOURCE_METHODS)):\n self._has_resource_functions.wait()\n if hasattr(self, '_restored_function'):\n with with_local_resource_restore_context(new_table):\n for name, tf_function in self._restored_function.items():\n setattr(new_table, name, tf_function)\n init_op = new_table._initialize()\n if not context.executing_eagerly():\n ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)\n ret = new_table.resource_handle\n return ret\n self._distributed_table = self._coordinator._create_per_worker_resources(create_copy)", + "docstring": "Create table objects and resources on each worker if hasn't been created.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py", + "ast_data": "FunctionDef name:_maybe_build_distributed_table arg:self arguments arg With If FunctionDef name:create_copy arguments Assign Call With While BoolOp Call Call Compare Call If Call With Call For Call Call Assign Call If Call Call Assign Return return:yes Assign Call" + }, + { + "library": "tensorflow", + "name": "get_calibration_min_max_value", + "source_code": "def get_calibration_min_max_value(self, calibration_statistics_serialized: bytes, calibration_options_serialized: bytes) -> Optional[tuple[float, float]]:\n statistics = calibration_statistics_pb2.CalibrationStatistics.FromString(calibration_statistics_serialized)\n options = stablehlo_quant_config_pb2.CalibrationOptions.FromString(calibration_options_serialized)\n return _call_and_return_none_on_error(functools.partial(calibration_algorithm.get_min_max_value, statistics, options), error_msg=f'Retrieving calibrated min / max failed. Options: {options}.')", + "docstring": "Calculates min and max values from statistics. Args: calibration_statistics_serialized: Serialized . This will be the source to calculate min and max values from. calibration_options_serialized: Serialized . Specifies how the min / max should be calculated. Returns: (min_value, max_value): Min and max calculated using calib_opts. upon error.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\py_function_lib.py", + "ast_data": "FunctionDef name:get_calibration_min_max_value arg:self arg:calibration_statistics_serialized arg:calibration_options_serialized arguments arg arg arg Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "batch_set_value", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef batch_set_value(tuples):\n if context.executing_eagerly() or ops.inside_function():\n for x, value in tuples:\n x.assign(numpy_compat.np_asarray(value, dtype=dtype_numpy(x)))\n else:\n with get_graph().as_default():\n if tuples:\n assign_ops = []\n feed_dict = {}\n for x, value in tuples:\n value = numpy_compat.np_asarray(value, dtype=dtype_numpy(x))\n tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])\n if hasattr(x, '_assign_placeholder'):\n assign_placeholder = x._assign_placeholder\n assign_op = x._assign_op\n else:\n placeholder_shape = tensor_shape.TensorShape([None] * value.ndim)\n assign_placeholder = array_ops.placeholder(tf_dtype, shape=placeholder_shape)\n assign_op = x.assign(assign_placeholder)\n x._assign_placeholder = assign_placeholder\n x._assign_op = assign_op\n assign_ops.append(assign_op)\n feed_dict[assign_placeholder] = value\n get_session().run(assign_ops, feed_dict=feed_dict)", + "docstring": "Sets the values of many tensor variables at once. Args: tuples: a list of tuples . should be a Numpy array.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:batch_set_value arg:tuples arguments arg If BoolOp Call Call For Call Call Call With Call Call If Assign Assign For Assign Call Call Assign Call Call If Call Assign Assign Assign Call Assign Call Assign Call Assign Assign Call Assign Call Call" + }, + { + "library": "pandas", + "name": "infer_to_same_shape", + "source_code": "def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame:\n result = self.obj._constructor(data=results)\n result = result.T\n result.index = res_index\n result = result.infer_objects()\n return result", + "docstring": "infer the results to the same shape as the input object", + "type": "method", + "file_path": "pandas\\pandas\\core\\apply.py", + "ast_data": "FunctionDef name:infer_to_same_shape arg:self arg:results arg:res_index arguments arg arg arg Assign Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_chunk_sharding_spec_check", + "source_code": "def _chunk_sharding_spec_check(spec, op):\n if not isinstance(spec, ChunkShardingSpec):\n raise NotImplementedError(f\"Only ChunkShardingSpec supported for '{op.__name__}'.\")", + "docstring": "For the given op implementation check if the sharding spec is ChunkShardingSpec.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\chunk_sharding_spec_ops\\_common.py", + "ast_data": "FunctionDef name:_chunk_sharding_spec_check arg:spec arg:op arguments arg arg If Call Raise Call" + }, + { + "library": "authlib", + "name": "authenticate_client_secret_basic", + "source_code": "def authenticate_client_secret_basic(query_client, request):\n client_id, client_secret = extract_basic_authorization(request.headers)\n if client_id and client_secret:\n client = _validate_client(query_client, client_id, 401)\n if client.check_client_secret(client_secret):\n log.debug(f'Authenticate {client_id} via \"client_secret_basic\" success')\n return client\n log.debug(f'Authenticate {client_id} via \"client_secret_basic\" failed')", + "docstring": "Authenticate client by `` method. The client uses HTTP Basic for authentication.", + "type": "function", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authenticate_client.py", + "ast_data": "FunctionDef name:authenticate_client_secret_basic arg:query_client arg:request arguments arg arg Assign Call If BoolOp Assign Call If Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "range_push", + "source_code": "def range_push(msg):\n return _nvtx.rangePushA(msg)", + "docstring": "Push a range onto a stack of nested range span. Returns zero-based depth of the range that is started. Args: msg (str): ASCII message to associate with range", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\nvtx.py", + "ast_data": "FunctionDef name:range_push arg:msg arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "alias", + "source_code": "def alias(self, *args, **kwargs):\n self._not_support_combined_queries('alias')\n return self._annotate(args, kwargs, select=False)", + "docstring": "Return a query set with added aliases for extra data or aggregations.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:alias arg:self arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "django", + "name": "save", + "source_code": "def save(self, name, content, max_length=None):\n if name is None:\n name = content.name\n if not hasattr(content, 'chunks'):\n content = File(content, name)\n validate_file_name(name, allow_relative_path=True)\n name = self.get_available_name(name, max_length=max_length)\n validate_file_name(name, allow_relative_path=True)\n name = self._save(name, content)\n validate_file_name(name, allow_relative_path=True)\n return name", + "docstring": "Save new content to the file specified by name. The content should be a proper File object or any Python file-like object, ready to be read from the beginning.", + "type": "method", + "file_path": "django\\django\\core\\files\\storage\\base.py", + "ast_data": "FunctionDef name:save arg:self arg:name arg:content arg:max_length arguments arg arg arg arg If Compare Assign If Call Assign Call Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_node_matches_argspec", + "source_code": "def _node_matches_argspec(node, func):\n arg_spec = tf_inspect.getfullargspec(func)\n node_args = tuple((_arg_name(arg) for arg in node.args.args))\n if node_args != tuple(arg_spec.args):\n return False\n if arg_spec.varargs != _arg_name(node.args.vararg):\n return False\n if arg_spec.varkw != _arg_name(node.args.kwarg):\n return False\n node_kwonlyargs = tuple((_arg_name(arg) for arg in node.args.kwonlyargs))\n if node_kwonlyargs != tuple(arg_spec.kwonlyargs):\n return False\n return True", + "docstring": "Returns True is node fits the argspec of func.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\parser.py", + "ast_data": "FunctionDef name:_node_matches_argspec arg:node arg:func arguments arg arg Assign Call Assign Call Call If Compare Call Return return:yes If Compare Call Return return:yes If Compare Call Return return:yes Assign Call Call If Compare Call Return return:yes Return return:yes" + }, + { + "library": "numpy", + "name": "_zseries_der", + "source_code": "def _zseries_der(zs):\n n = len(zs) // 2\n ns = np.array([-1, 0, 1], dtype=zs.dtype)\n zs *= np.arange(-n, n + 1) * 2\n d, r = _zseries_div(zs, ns)\n return d", + "docstring": "Differentiate a z-series. The derivative is with respect to x, not z. This is achieved using the chain rule and the value of dx/dz given in the module notes. Parameters ---------- zs : z-series The z-series to differentiate. Returns ------- derivative : z-series The derivative Notes ----- The zseries for x (ns) has been multiplied by two in order to avoid using floats that are incompatible with Decimal and likely other specialized scalar types. This scaling has been compensated by multiplying the value of zs by two also so that the two cancels in the division.", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\chebyshev.py", + "ast_data": "FunctionDef name:_zseries_der arg:zs arguments arg Assign Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "composite_images", + "source_code": "def composite_images(images, renderer, magnification=1.0):\n if len(images) == 0:\n return (np.empty((0, 0, 4), dtype=np.uint8), 0, 0)\n parts = []\n bboxes = []\n for image in images:\n data, x, y, trans = image.make_image(renderer, magnification)\n if data is not None:\n x *= magnification\n y *= magnification\n parts.append((data, x, y, image._get_scalar_alpha()))\n bboxes.append(Bbox([[x, y], [x + data.shape[1], y + data.shape[0]]]))\n if len(parts) == 0:\n return (np.empty((0, 0, 4), dtype=np.uint8), 0, 0)\n bbox = Bbox.union(bboxes)\n output = np.zeros((int(bbox.height), int(bbox.width), 4), dtype=np.uint8)\n for data, x, y, alpha in parts:\n trans = Affine2D().translate(x - bbox.x0, y - bbox.y0)\n _image.resample(data, output, trans, _image.NEAREST, resample=False, alpha=alpha)\n return (output, bbox.x0 / magnification, bbox.y0 / magnification)", + "docstring": "Composite a number of RGBA images into one. The images are composited in the order in which they appear in the *images* list. Parameters ---------- images : list of Images Each must have a method. For each image, should return , though this is not enforced by this function. Each image must have a purely affine transformation with no shear. renderer : magnification : float, default: 1 The additional magnification to apply for the renderer in use. Returns ------- image : (M, N, 4) array The composited RGBA image. offset_x, offset_y : float The (left, bottom) offset where the composited image should be placed in the output figure.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\image.py", + "ast_data": "FunctionDef name:composite_images arg:images arg:renderer arg:magnification arguments arg arg arg If Compare Call Return return:yes Call Assign Assign For Assign Call If Compare Call Call Call Call If Compare Call Return return:yes Call Assign Call Assign Call Call Call For Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "event_shape", + "source_code": "@property\ndef event_shape(self) -> torch.Size:\n return self._event_shape", + "docstring": "Returns the shape of a single sample (without batching).", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\distribution.py", + "ast_data": "FunctionDef name:event_shape arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_deferred_pool_runner", + "source_code": "def _deferred_pool_runner(has_chief, num_workers, initializer=None, share_gpu=True):\n container = []\n\n def get_or_create():\n if not container:\n cluster_spec = multi_worker_test_base.create_cluster_spec(has_chief=has_chief, num_workers=num_workers, num_ps=0, has_eval=False)\n runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec, initializer=initializer, share_gpu=share_gpu)\n container.append(runner)\n return container[0]\n return get_or_create", + "docstring": "Returns a callable that returns the pool runner. It creates the pool runner only upon first invocation. This avoids creating it when this file is imported. Args: has_chief: whether there should be a chief. num_workers: the number of workers excluding the chief. initializer: initializer of each process. share_gpu: whether to share GPU between the workers. Returns: A callable that returns the runner.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\strategy_combinations.py", + "ast_data": "FunctionDef name:_deferred_pool_runner arg:has_chief arg:num_workers arg:initializer arg:share_gpu arguments arg arg arg arg Assign FunctionDef name:get_or_create arguments If Assign Call Assign Call Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "exp", + "source_code": "@staticmethod\n@maybe_upcast_float32()\ndef exp(x):\n if config.use_fast_math:\n return f'libdevice.exp2({x} * {TritonOverrides._LOG_2_E})'\n else:\n return f'tl_math.exp({x})'", + "docstring": "When use_fast_math, use the ftz (flushing to zero) variant of exponent computation. Check for more details.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py", + "ast_data": "FunctionDef name:exp arg:x arguments arg If Return return:yes Return return:yes Call" + }, + { + "library": "django", + "name": "ContextMixin", + "source_code": "class ContextMixin:\n extra_context = None\n\n def get_context_data(self, **kwargs):\n kwargs.setdefault('view', self)\n if self.extra_context is not None:\n kwargs.update(self.extra_context)\n return kwargs", + "docstring": "A default context mixin that passes the keyword arguments received by get_context_data() as the template context.", + "type": "class", + "file_path": "django\\django\\views\\generic\\base.py", + "ast_data": "ClassDef name:ContextMixin Assign FunctionDef name:get_context_data arg:self arguments arg arg Call If Compare Call Return return:yes" + }, + { + "library": "pytorch", + "name": "ProfilerAction", + "source_code": "class ProfilerAction(Enum):\n NONE = 0\n WARMUP = 1\n RECORD = 2\n RECORD_AND_SAVE = 3", + "docstring": "Profiler actions that can be taken at the specified intervals", + "type": "class", + "file_path": "pytorch\\torch\\profiler\\profiler.py", + "ast_data": "ClassDef name:ProfilerAction Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "gen_tensor_dims", + "source_code": "def gen_tensor_dims(n, curr):\n dims = []\n for _ in range(n):\n dvar, curr = gen_dvar(curr)\n dims.append(dvar)\n return (dims, curr)", + "docstring": "Generate a list of tensor dimensions :param n: the number of dimensions :param curr: the current counter :return: a list of dimension variables and an updated counter", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\util.py", + "ast_data": "FunctionDef name:gen_tensor_dims arg:n arg:curr arguments arg arg Assign For Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "pointwise_or_reduction_read_writes", + "source_code": "def pointwise_or_reduction_read_writes(self, pointwise: bool=True) -> dependencies.ReadWrites:\n keep_sizes, ignore_sizes = self._sizes if pointwise else reversed(self._sizes)\n return dependencies.extract_read_writes(self._body, keep_sizes, hidden_args=[[sympy.S.Zero] * len(ignore_sizes)])", + "docstring": "Get the memory dependencies in either the pointwise or the reduction axes.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:pointwise_or_reduction_read_writes arg:self arg:pointwise arguments arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "XLAOptions", + "source_code": "@tf_export('tpu.XLAOptions')\nclass XLAOptions(collections.namedtuple('XLAOptions', ['use_spmd_for_xla_partitioning', 'enable_xla_dynamic_padder'])):\n\n def __new__(cls, use_spmd_for_xla_partitioning=True, enable_xla_dynamic_padder=True):\n return super(XLAOptions, cls).__new__(cls, use_spmd_for_xla_partitioning, enable_xla_dynamic_padder)", + "docstring": "XLA compilation options. Attributes: use_spmd_for_xla_partitioning: Boolean. Whether to use XLA's SPMD partitioner instead of MPMD partitioner when compiler partitioning is requested. enable_xla_dynamic_padder: Boolean. Whether to enable XLA dynamic padder infrastructure to handle dynamic shapes inputs inside XLA. True by default. Disabling this may cause correctness issues with dynamic shapes inputs, as XLA will just assume the inputs are with padded shapes. However users can optionally set it to False to improve device time if masking is already handled in the user side.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py", + "ast_data": "ClassDef name:XLAOptions Call FunctionDef name:__new__ arg:cls arg:use_spmd_for_xla_partitioning arg:enable_xla_dynamic_padder arguments arg arg arg Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "_validate_symmetry", + "source_code": "def _validate_symmetry(symmetry):\n if symmetry is None:\n return 'general'\n symmetry = str(symmetry).lower()\n symmetries = ['general', 'symmetric', 'skew-symmetric', 'hermitian']\n if symmetry not in symmetries:\n raise ValueError('Invalid symmetry. Must be one of: ' + ', '.join(symmetries))\n return symmetry", + "docstring": "Check that the symmetry parameter is one that MatrixMarket allows..", + "type": "function", + "file_path": "scipy\\scipy\\io\\_fast_matrix_market\\__init__.py", + "ast_data": "FunctionDef name:_validate_symmetry arg:symmetry arguments arg If Compare Return return:yes Assign Call Call Assign If Compare Raise Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "@_docstring.interpd\ndef __init__(self, bbox1, bbox2, loc1, loc2=None, **kwargs):\n if 'transform' in kwargs:\n raise ValueError('transform should not be set')\n kwargs['transform'] = IdentityTransform()\n kwargs.setdefault('fill', bool({'fc', 'facecolor', 'color'}.intersection(kwargs)))\n super().__init__(**kwargs)\n self.bbox1 = bbox1\n self.bbox2 = bbox2\n self.loc1 = loc1\n self.loc2 = loc2", + "docstring": "Connect two bboxes with a straight line. Parameters ---------- bbox1, bbox2 : Bounding boxes to connect. loc1, loc2 : {1, 2, 3, 4} Corner of *bbox1* and *bbox2* to draw the line. Valid values are:: 'upper right' : 1, 'upper left' : 2, 'lower left' : 3, 'lower right' : 4 *loc2* is optional and defaults to *loc1*. **kwargs Patch properties for the line drawn. Valid arguments include: %(Patch:kwdoc)s", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\inset_locator.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:bbox1 arg:bbox2 arg:loc1 arg:loc2 arguments arg arg arg arg arg arg If Compare Raise Call Assign Call Call Call Call Call Call Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "on_train_batch_end", + "source_code": "def on_train_batch_end(self, batch, logs=None):\n if self._should_call_train_batch_hooks:\n self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)", + "docstring": "Calls the methods of its callbacks. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:on_train_batch_end arg:self arg:batch arg:logs arguments arg arg arg If Call" + }, + { + "library": "scikit-learn", + "name": "loads", + "source_code": "def loads(s, encode_nominal=False, return_type=DENSE):\n decoder = ArffDecoder()\n return decoder.decode(s, encode_nominal=encode_nominal, return_type=return_type)", + "docstring": "Convert a string instance containing the ARFF document into a Python object. :param s: a string object. :param encode_nominal: boolean, if True perform a label encoding while reading the .arff file. :param return_type: determines the data structure used to store the dataset. Can be one of , , , or . Consult the sections on _ and _. :return: a dictionary.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\_arff.py", + "ast_data": "FunctionDef name:loads arg:s arg:encode_nominal arg:return_type arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "score", + "source_code": "def score(self, X, y, sample_weight=None):\n from .metrics import accuracy_score\n return accuracy_score(y, self.predict(X), sample_weight=sample_weight)", + "docstring": "Return :ref: on provided data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True labels for . sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float Mean accuracy of `y`.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\base.py", + "ast_data": "FunctionDef name:score arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "compute_global_tensor_shape", + "source_code": "def compute_global_tensor_shape(shape: torch.Size, mesh: DeviceMesh, placements: Sequence[Placement]) -> torch.Size:\n if len(placements) != 1:\n raise NotImplementedError('compute_global_tensor_shape only supports 1 placement for now.')\n if len(placements) != mesh.ndim:\n raise RuntimeError(f'Expected one placement per mesh dim, but found {len(placements)} placements and {mesh.ndim} mesh dims.')\n if isinstance(placements[0], Replicate):\n return shape\n elif isinstance(placements[0], Shard):\n local_shape = torch.tensor(list(shape))\n gathered_shaped_tensors = [torch.empty_like(local_shape, device=local_shape.device) for _ in range(mesh.size())]\n funcol.all_gather_inplace(gathered_shaped_tensors, local_shape)\n sharded_dim_sum = 0\n shard_dim = placements[0].dim\n other_dims = [d for d in range(mesh.ndim) if d != shard_dim]\n for shape_tensor in gathered_shaped_tensors:\n if not torch.equal(local_shape[other_dims], shape_tensor[other_dims]):\n raise RuntimeError('Non-sharded dimentions should have identical size across ranks.')\n shape_tensor_list = shape_tensor.tolist()\n sharded_dim_sum += shape_tensor_list[shard_dim]\n global_shape = list(shape)\n global_shape[placements[0].dim] = sharded_dim_sum\n return torch.Size(global_shape)\n else:\n raise NotImplementedError(f'Placement type {type(placements[0])} not supported.')", + "docstring": "Compute the global size of a DTensor from the given local tensor shape, the mesh and placements. Different from , which assumes sharding is even, this util allgathers local shards' shapes from all ranks and thus can support uneven sharding. NOTE: Currently this function only supports 1D mesh. Args: shape (:class:): Shape of the local tensor mesh (:class:): Object which describes the mesh topology of devices for the DTensor. placements (Sequence[:class:]]): The attribute of the DTensor that describes its layout on the mesh topology. Return: tensor_shape: Shape of the global DTensor.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\_utils.py", + "ast_data": "FunctionDef name:compute_global_tensor_shape arg:shape arg:mesh arg:placements arguments arg arg arg If Compare Call Raise Call If Compare Call Raise Call Call If Call Return return:yes If Call Assign Call Call Assign Call Call Call Call Assign Assign Assign Call Compare For If Call Raise Call Assign Call Assign Call Assign Return return:yes Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "flush", + "source_code": "def flush(self):\n if context.executing_eagerly() and self._closed:\n return\n with ops.device('cpu:0'):\n return gen_summary_ops.flush_summary_writer(self._resource)", + "docstring": "See .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:flush arg:self arguments arg If BoolOp Call Return return:no With Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "local_device_locations", + "source_code": "def local_device_locations(self) -> List[Dict[str, int]]:\n mapping = self.unravel_index()\n return [mapping[device_id] for device_id in self.local_device_ids()]", + "docstring": "Returns a list of local device locations. A device location is a dictionary from dimension names to indices on those dimensions.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py", + "ast_data": "FunctionDef name:local_device_locations arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_Constraint", + "source_code": "class _Constraint(ABC):\n\n def __init__(self):\n self.hidden = False\n\n @abstractmethod\n def is_satisfied_by(self, val):\n pass\n\n @abstractmethod\n def __str__(self):\n pass", + "docstring": "Base class for the constraint objects.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py", + "ast_data": "ClassDef name:_Constraint FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg FunctionDef name:__str__ arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "getsourcefile", + "source_code": "def getsourcefile(object):\n return _inspect.getsourcefile(tf_decorator.unwrap(object)[1])", + "docstring": "TFDecorator-aware replacement for inspect.getsourcefile.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py", + "ast_data": "FunctionDef name:getsourcefile arg:object arguments arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "ResolvedOuterRef", + "source_code": "class ResolvedOuterRef(F):\n contains_aggregate = False\n contains_over_clause = False\n\n def as_sql(self, *args, **kwargs):\n raise ValueError('This queryset contains a reference to an outer query and may only be used in a subquery.')\n\n def resolve_expression(self, *args, **kwargs):\n col = super().resolve_expression(*args, **kwargs)\n if col.contains_over_clause:\n raise NotSupportedError(f'Referencing outer query window expression is not supported: {self.name}.')\n col.possibly_multivalued = LOOKUP_SEP in self.name\n return col\n\n def relabeled_clone(self, relabels):\n return self\n\n def get_group_by_cols(self):\n return []", + "docstring": "An object that contains a reference to an outer query. In this case, the reference to the outer query has been resolved because the inner query has been used as a subquery.", + "type": "class", + "file_path": "django\\django\\db\\models\\expressions.py", + "ast_data": "ClassDef name:ResolvedOuterRef Assign Assign FunctionDef name:as_sql arg:self arguments arg arg arg Raise Call FunctionDef name:resolve_expression arg:self arguments arg arg arg Assign Call Call If Raise Call Assign Compare Return return:yes FunctionDef name:relabeled_clone arg:self arg:relabels arguments arg arg Return return:yes FunctionDef name:get_group_by_cols arg:self arguments arg Return return:no" + }, + { + "library": "pandas", + "name": "InvalidVersion", + "source_code": "class InvalidVersion(ValueError):\n pass", + "docstring": "An invalid version was found, users should refer to PEP 440. The `` exception is raised when a version string is improperly formatted. Pandas uses this exception to ensure that all version strings are PEP 440 compliant. See Also -------- util.version.Version : Class for handling and parsing version strings. Examples -------- >>> pd.util.version.Version(\"1.\") Traceback (most recent call last): InvalidVersion: Invalid version: '1.'", + "type": "class", + "file_path": "pandas\\pandas\\util\\version\\__init__.py", + "ast_data": "ClassDef name:InvalidVersion" + }, + { + "library": "pytorch", + "name": "post_hook", + "source_code": "def post_hook(self, is_last_joiner: bool) -> None:\n pass", + "docstring": "Call hook after all processes have joined. It is passed an additional `` otherwise.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py", + "ast_data": "FunctionDef name:post_hook arg:self arg:is_last_joiner arguments arg arg" + }, + { + "library": "tensorflow", + "name": "unpack_x_y_sample_weight", + "source_code": "def unpack_x_y_sample_weight(data):\n if not isinstance(data, tuple):\n return (data, None, None)\n elif len(data) == 1:\n return (data[0], None, None)\n elif len(data) == 2:\n return (data[0], data[1], None)\n elif len(data) == 3:\n return (data[0], data[1], data[2])\n else:\n error_msg = 'Data is expected to be in format `x`, `(x,)`, `(x, y)`, or `(x, y, sample_weight)`, found: {}'.format(data)\n raise ValueError(error_msg)", + "docstring": "Unpacks user-provided data tuple. This is a convenience utility to be used when overriding , , or . This utility makes it easy to support data of the form , , or . Standalone usage: >>> features_batch = tf.ones((10, 5)) >>> labels_batch = tf.zeros((10, 5)) >>> data = (features_batch, labels_batch) >>> # and will default to if not provided. >>> x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data) >>> sample_weight is None True Example in overridden : Args: data: A tuple of the form , , or . Returns: The unpacked tuple, with s for and if they are not provided.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", + "ast_data": "FunctionDef name:unpack_x_y_sample_weight arg:data arguments arg If Call Return return:yes If Compare Call Return return:yes If Compare Call Return return:yes If Compare Call Return return:yes Assign Call Raise Call" + }, + { + "library": "pytorch", + "name": "_find_cuda_home", + "source_code": "def _find_cuda_home() -> Optional[str]:\n cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')\n if cuda_home is None:\n nvcc_path = shutil.which('nvcc')\n if nvcc_path is not None:\n cuda_home = os.path.dirname(os.path.dirname(nvcc_path))\n else:\n if IS_WINDOWS:\n cuda_homes = glob.glob('C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*')\n if len(cuda_homes) == 0:\n cuda_home = ''\n else:\n cuda_home = cuda_homes[0]\n else:\n cuda_home = '/usr/local/cuda'\n if not os.path.exists(cuda_home):\n cuda_home = None\n if cuda_home and (not torch.cuda.is_available()):\n logger.warning(\"No CUDA runtime is found, using CUDA_HOME='%s'\", cuda_home)\n return cuda_home", + "docstring": "Find the CUDA install path.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\cpp_extension.py", + "ast_data": "FunctionDef name:_find_cuda_home arguments Assign BoolOp Call Call If Compare Assign Call If Compare Assign Call Call If Assign Call If Compare Call Assign Assign Assign If Call Assign If BoolOp Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_reduce_scatter_tensors", + "source_code": "@no_type_check\ndef _get_reduce_scatter_tensors(state: _FSDPState, unsharded_grad: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:\n chunks = list(unsharded_grad.chunk(state.world_size))\n numel_to_pad = state.world_size * chunks[0].numel() - unsharded_grad.numel()\n padded_unsharded_grad = F.pad(unsharded_grad, [0, numel_to_pad]) if numel_to_pad > 0 else unsharded_grad\n new_sharded_grad = torch.empty_like(chunks[0])\n return (padded_unsharded_grad, new_sharded_grad)", + "docstring": "Returns the input and output tensors to reduce-scatter, respectively.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py", + "ast_data": "FunctionDef name:_get_reduce_scatter_tensors arg:state arg:unsharded_grad arguments arg arg Assign Call Call Assign Call Call Assign Compare Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_copy_weights_to_distributed_model", + "source_code": "def _copy_weights_to_distributed_model(original_model, mode):\n strategy = original_model._distribution_strategy\n distributed_model = get_distributed_model(original_model, mode)\n if strategy:\n orig_model_weights = original_model.get_weights()\n first_model = strategy.unwrap(distributed_model)[0]\n set_weights(strategy, first_model, orig_model_weights)", + "docstring": "Copies weights from original model to distributed models.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py", + "ast_data": "FunctionDef name:_copy_weights_to_distributed_model arg:original_model arg:mode arguments arg arg Assign Assign Call If Assign Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "list_stack", + "source_code": "def list_stack(list_, opts):\n assert isinstance(opts, ListStackOpts)\n if isinstance(list_, tensor_array_ops.TensorArray):\n return _tf_tensorarray_stack(list_)\n elif tensor_util.is_tf_type(list_):\n if list_.dtype == dtypes.variant:\n return _tf_tensor_list_stack(list_, opts)\n else:\n return list_\n else:\n return _py_list_stack(list_, opts)", + "docstring": "The list stack function. This does not have a direct correspondent in Python. The closest idiom to this is tf.append or np.stack. It's different from those in the sense that it accepts a Tensor list, rather than a list of tensors. It can also accept TensorArray. When the target is anything else, the dispatcher will rely on ctx.original_call for fallback. Args: list_: An entity that supports append semantics. opts: A ListStackOpts object. Returns: The output of the stack operation, typically a Tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\data_structures.py", + "ast_data": "FunctionDef name:list_stack arg:list_ arg:opts arguments arg arg Call If Call Return return:yes Call If Call If Compare Return return:yes Call Return return:yes Return return:yes Call" + }, + { + "library": "pandas", + "name": "MultiIndexPyIntEngine", + "source_code": "class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine):\n _base = libindex.ObjectEngine\n _codes_dtype = 'object'", + "docstring": "Manages a MultiIndex by mapping label combinations to positive integers. This class manages those (extreme) cases in which the number of possible label combinations overflows the 64 bits integers, and uses an ObjectEngine containing Python integers.", + "type": "class", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "ClassDef name:MultiIndexPyIntEngine Assign Assign" + }, + { + "library": "tensorflow", + "name": "indexed_case", + "source_code": "def indexed_case(branch_index, branch_fns, name='indexed_case', lower_using_switch_merge=None):\n if isinstance(branch_index, int):\n raise TypeError('branch_index must not be a Python int', branch_index)\n with ops.name_scope(name) as scope:\n branch_names = [util.unique_fn_name(scope, 'branch{}'.format(b)) for b in range(len(branch_fns))]\n add_control_dependencies = ops.get_default_graph()._add_control_dependencies\n branch_index = ops.convert_to_tensor(branch_index, name='branch_index')\n branch_graphs = []\n for branch_name, branch_fn in zip(branch_names, branch_fns):\n branch_graphs.append(func_graph_module.func_graph_from_py_func(branch_name, branch_fn, [], {}, func_graph=util.CondBranchFuncGraph(branch_name, collections=ops.get_default_graph()._collections), add_control_dependencies=add_control_dependencies, op_return_value=branch_index))\n verify_captures(_CASE, branch_graphs)\n return _build_case(branch_index, branch_graphs, [g.external_captures for g in branch_graphs], name=scope, lower_using_switch_merge=lower_using_switch_merge)", + "docstring": "Like conv_v2, except emits a Case op instead of an If.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py", + "ast_data": "FunctionDef name:indexed_case arg:branch_index arg:branch_fns arg:name arg:lower_using_switch_merge arguments arg arg arg arg If Call Raise Call With Call Assign Call Call Call Call Assign Call Assign Call Assign For Call Call Call Call Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "check_convergence", + "source_code": "def check_convergence(self, X, y, sample_weight):\n if self.verbose:\n print(' Check Convergence')\n g_max_abs = np.max(np.abs(self.gradient))\n check = g_max_abs <= self.tol\n if self.verbose:\n print(f' 1. max |gradient| {g_max_abs} <= {self.tol} {check}')\n if not check:\n return\n d2 = self.coef_newton @ self.hessian @ self.coef_newton\n check = 0.5 * d2 <= self.tol\n if self.verbose:\n print(f' 2. Newton decrement {0.5 * d2} <= {self.tol} {check}')\n if not check:\n return\n if self.verbose:\n loss_value = self.linear_loss.loss(coef=self.coef, X=X, y=y, sample_weight=sample_weight, l2_reg_strength=self.l2_reg_strength, n_threads=self.n_threads)\n print(f' Solver did converge at loss = {loss_value}.')\n self.converged = True", + "docstring": "Check for convergence. Sets self.converged.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_glm\\_newton_solver.py", + "ast_data": "FunctionDef name:check_convergence arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg If Call Assign Call Call Assign Compare If Call If Return return:no Assign Assign Compare If Call If Return return:no If Assign Call Call Assign" + }, + { + "library": "matplotlib", + "name": "scroll_event_windows", + "source_code": "def scroll_event_windows(self, event):\n w = event.widget.winfo_containing(event.x_root, event.y_root)\n if w != self._tkcanvas:\n return\n x = self._tkcanvas.canvasx(event.x_root - w.winfo_rootx())\n y = self.figure.bbox.height - self._tkcanvas.canvasy(event.y_root - w.winfo_rooty())\n step = event.delta / 120\n MouseEvent('scroll_event', self, x, y, step=step, modifiers=self._mpl_modifiers(event), guiEvent=event)._process()", + "docstring": "MouseWheel event processor", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\_backend_tk.py", + "ast_data": "FunctionDef name:scroll_event_windows arg:self arg:event arguments arg arg Assign Call If Compare Return return:no Assign Call Call Assign Call Call Assign Call Call Call" + }, + { + "library": "pandas", + "name": "map", + "source_code": "def map(self, mapper, na_action: Literal['ignore'] | None=None):\n from pandas.core.indexes.multi import MultiIndex\n new_values = self._map_values(mapper, na_action=na_action)\n if new_values.size and isinstance(new_values[0], tuple):\n if isinstance(self, MultiIndex):\n names = self.names\n elif self.name:\n names = [self.name] * len(new_values[0])\n else:\n names = None\n return MultiIndex.from_tuples(new_values, names=names)\n dtype = None\n if not new_values.size:\n dtype = self.dtype\n same_dtype = lib.infer_dtype(new_values, skipna=False) == self.inferred_type\n if same_dtype:\n new_values = maybe_cast_pointwise_result(new_values, self.dtype, same_dtype=same_dtype)\n return Index._with_infer(new_values, dtype=dtype, copy=False, name=self.name)", + "docstring": "Map values using an input mapping or function. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- Union[Index, MultiIndex] The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned. See Also -------- Index.where : Replace values where the condition is False. Examples -------- >>> idx = pd.Index([1, 2, 3]) >>> idx.map({1: \"a\", 2: \"b\", 3: \"c\"}) Index(['a', 'b', 'c'], dtype='object') Using with a function: >>> idx = pd.Index([1, 2, 3]) >>> idx.map(\"I am a {}\".format) Index(['I am a 1', 'I am a 2', 'I am a 3'], dtype='object') >>> idx = pd.Index([\"a\", \"b\", \"c\"]) >>> idx.map(lambda x: x.upper()) Index(['A', 'B', 'C'], dtype='object')", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:map arg:self arg:mapper arg:na_action arguments arg arg arg Assign Call If BoolOp Call If Call Assign If Assign Call Assign Return return:yes Call Assign If Assign Assign Compare Call If Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "maybe_lift_tracked_freevar_to_input", + "source_code": "def maybe_lift_tracked_freevar_to_input(self, arg):\n if not isinstance(arg, torch.fx.Proxy):\n if isinstance(arg, slice):\n return slice(*(self.maybe_lift_tracked_freevar_to_input(sub_arg) for sub_arg in (arg.start, arg.stop, arg.step)))\n else:\n return arg\n elif arg.tracer == self:\n return arg\n return self.lift_tracked_freevar_to_input(arg)", + "docstring": "If arg is a free variable, then lift it to be an input. Returns the new lifted arg (if arg was a freevar), else the original arg.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\output_graph.py", + "ast_data": "FunctionDef name:maybe_lift_tracked_freevar_to_input arg:self arg:arg arguments arg arg If Call If Call Return return:yes Call Call Return return:yes If Compare Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "create_symfloatnode", + "source_code": "@record_shapeenv_event()\ndef create_symfloatnode(self, sym: sympy.Expr, *, hint: Optional[int], source: Optional[Source]=None) -> FloatLikeType:\n if self._translation_validation_enabled and source is not None:\n symbol = self._create_symbol_for_source(source)\n assert symbol is not None\n fx_node = self._create_fx_placeholder_and_z3var(symbol, float)\n self._add_assertion(sympy.Eq(symbol, sym))\n else:\n fx_node = None\n out: FloatLikeType\n if isinstance(sym, sympy.Float):\n if hint is not None:\n assert float(sym) == hint\n out = float(sym)\n else:\n if free_unbacked_symbols(sym):\n assert hint is None, sym\n out = SymFloat(SymNode(sym, self, float, hint, fx_node=fx_node))\n return out", + "docstring": "Create a SymFloat value from a symbolic expression", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:create_symfloatnode arg:self arg:sym arguments arg arg arg arg If BoolOp Compare Assign Call Compare Assign Call Call Call Assign If Call If Compare Compare Call Assign Call If Call Compare Assign Call Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "create_container", + "source_code": "def create_container(self, X_output, X_original, columns, inplace=False):\n pass", + "docstring": "Create container from with additional metadata. Parameters ---------- X_output : {ndarray, dataframe} Data to wrap. X_original : {ndarray, dataframe} Original input dataframe. This is used to extract the metadata that should be passed to , e.g. pandas row index. columns : callable, ndarray, or None The column names or a callable that returns the column names. The callable is useful if the column names require some computation. If , then no columns are passed to the container's constructor. inplace : bool, default=False Whether or not we intend to modify in-place. However, it does not guarantee that we return the same object if the in-place operation is not possible. Returns ------- wrapped_output : container_type wrapped into the container type.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\utils\\_set_output.py", + "ast_data": "FunctionDef name:create_container arg:self arg:X_output arg:X_original arg:columns arg:inplace arguments arg arg arg arg arg" + }, + { + "library": "tensorflow", + "name": "cholesky", + "source_code": "def cholesky(self, name: str='cholesky') -> 'LinearOperator':\n if not self._can_use_cholesky():\n raise ValueError('Cannot take the Cholesky decomposition: Not a positive definite self adjoint matrix.')\n with self._name_scope(name):\n return self._linop_cholesky()", + "docstring": "Returns a Cholesky factor as a . Given representing this , if is positive definite self-adjoint, return , where , i.e. the cholesky decomposition. Args: name: A name for this . Returns: which represents the lower triangular matrix in the Cholesky decomposition. Raises: ValueError: When the is not hinted to be positive definite and self adjoint.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:cholesky arg:self arg:name arguments arg arg If Call Raise Call With Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "exit_finally_section", + "source_code": "def exit_finally_section(self, section_id):\n assert section_id not in self.pending_finally_sections, 'Empty finally?'\n self.finally_section_subgraphs[section_id][1] = self.leaves\n if not self.finally_section_has_direct_flow[section_id]:\n self.leaves = set()\n del self.finally_section_has_direct_flow[section_id]", + "docstring": "Exits a finally section.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py", + "ast_data": "FunctionDef name:exit_finally_section arg:self arg:section_id arguments arg arg Compare Assign If Assign Call" + }, + { + "library": "cryptography", + "name": "public_bytes", + "source_code": "def public_bytes(self) -> bytes:\n raise NotImplementedError(f'public_bytes is not implemented for extension type {self!r}')", + "docstring": "Serializes the extension type to DER.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\x509\\extensions.py", + "ast_data": "FunctionDef name:public_bytes arg:self arguments arg Raise Call" + }, + { + "library": "cherrypy", + "name": "header", + "source_code": "def header(self):\n return '

Generators rule!

'", + "docstring": "Render HTML layout header.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\tutorial\\tut08_generators_and_yield.py", + "ast_data": "FunctionDef name:header arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "mark_buffer_mutated", + "source_code": "def mark_buffer_mutated(self, name: str) -> None:\n assert isinstance(name, str)\n self.mutated_buffers.add(name)\n if name not in self.name_to_users:\n return\n for user in self.name_to_users[name]:\n user.realize()", + "docstring": "When a buffer is mutated we need to make sure all the reads to the old version are realized before the mutation happens.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\graph.py", + "ast_data": "FunctionDef name:mark_buffer_mutated arg:self arg:name arguments arg arg Call Call If Compare Return return:no For Call" + }, + { + "library": "tensorflow", + "name": "StackTraceFilter", + "source_code": "class StackTraceFilter(StackTraceTransform):\n _stack_dict = _source_filter_stacks\n\n def __init__(self):\n self.internal_set = _tf_stack.PyBindFileSet()\n\n def update(self):\n self.internal_set.update_to(set(self.get_filtered_filenames()))\n\n def get_filtered_filenames(self):\n raise NotImplementedError('subclasses need to override this')", + "docstring": "Allows filtering traceback information by removing superfluous frames.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_stack.py", + "ast_data": "ClassDef name:StackTraceFilter Assign FunctionDef name:__init__ arg:self arguments arg Assign Call FunctionDef name:update arg:self arguments arg Call Call Call FunctionDef name:get_filtered_filenames arg:self arguments arg Raise Call" + }, + { + "library": "pandas", + "name": "_from_arrays", + "source_code": "@classmethod\ndef _from_arrays(cls, arrays, columns, index, dtype: Dtype | None=None, verify_integrity: bool=True) -> Self:\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n columns = ensure_index(columns)\n if len(columns) != len(arrays):\n raise ValueError('len(columns) must match len(arrays)')\n mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity)\n return cls._from_mgr(mgr, axes=mgr.axes)", + "docstring": "Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that and are ensured to be an Index object. Returns ------- DataFrame", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:_from_arrays arg:cls arg:arrays arg:columns arg:index arg:dtype arg:verify_integrity arguments arg arg arg arg arg arg If Compare Assign Call Assign Call If Compare Call Call Raise Call Assign Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "byte_bounds", + "source_code": "@set_module('numpy.lib.array_utils')\ndef byte_bounds(a):\n ai = a.__array_interface__\n a_data = ai['data'][0]\n astrides = ai['strides']\n ashape = ai['shape']\n bytes_a = asarray(a).dtype.itemsize\n a_low = a_high = a_data\n if astrides is None:\n a_high += a.size * bytes_a\n else:\n for shape, stride in zip(ashape, astrides):\n if stride < 0:\n a_low += (shape - 1) * stride\n else:\n a_high += (shape - 1) * stride\n a_high += bytes_a\n return (a_low, a_high)", + "docstring": "Returns pointers to the end-points of an array. Parameters ---------- a : ndarray Input array. It must conform to the Python-side of the array interface. Returns ------- (low, high) : tuple of 2 integers The first integer is the first byte of the array, the second integer is just past the last byte of the array. If is not contiguous it will not use every byte between the (, ) values. Examples -------- >>> import numpy as np >>> I = np.eye(2, dtype='f'); I.dtype dtype('float32') >>> low, high = np.lib.array_utils.byte_bounds(I) >>> high - low == I.size*I.itemsize True >>> I = np.eye(2); I.dtype dtype('float64') >>> low, high = np.lib.array_utils.byte_bounds(I) >>> high - low == I.size*I.itemsize True", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_array_utils_impl.py", + "ast_data": "FunctionDef name:byte_bounds arg:a arguments arg Assign Assign Assign Assign Assign Call Assign If Compare For Call If Compare Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_default_bbox_extra_artists", + "source_code": "def get_default_bbox_extra_artists(self):\n artists = self.get_children()\n for axis in self._axis_map.values():\n artists.remove(axis)\n if not (self.axison and self._frameon):\n for spine in self.spines.values():\n artists.remove(spine)\n artists.remove(self.title)\n artists.remove(self._left_title)\n artists.remove(self._right_title)\n noclip = (_AxesBase, maxis.Axis, offsetbox.AnnotationBbox, offsetbox.OffsetBox)\n return [a for a in artists if a.get_visible() and a.get_in_layout() and (isinstance(a, noclip) or not a._fully_clipped_to_axes())]", + "docstring": "Return a default list of artists that are used for the bounding box calculation. Artists are excluded either by not being visible or ``.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:get_default_bbox_extra_artists arg:self arguments arg Assign Call For Call Call If BoolOp For Call Call Call Call Call Assign Return return:yes BoolOp Call Call BoolOp Call Call" + }, + { + "library": "sphinx", + "name": "merge_info_from", + "source_code": "def merge_info_from(self, docnames: Iterable[str], other: BuildEnvironment, app: Sphinx) -> None:\n docnames = frozenset(docnames)\n for docname in docnames:\n self.all_docs[docname] = other.all_docs[docname]\n self.included[docname] = other.included[docname]\n if docname in other.reread_always:\n self.reread_always.add(docname)\n self.domains._merge_domain_data(docnames, other.domaindata)\n self.events.emit('env-merge-info', self, docnames, other)", + "docstring": "Merge global information gathered about *docnames* while reading them from the *other* environment. This possibly comes from a parallel build process.", + "type": "method", + "file_path": "sphinx\\sphinx\\environment\\__init__.py", + "ast_data": "FunctionDef name:merge_info_from arg:self arg:docnames arg:other arg:app arguments arg arg arg arg Assign Call For Assign Assign If Compare Call Call Call" + }, + { + "library": "pytorch", + "name": "forward", + "source_code": "def forward(self, outputs, targets):\n outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}\n indices = self.matcher(outputs_without_aux, targets)\n num_boxes = sum((len(t['labels']) for t in targets))\n num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))\n if 'aux_outputs' in outputs:\n for i, aux_outputs in enumerate(outputs['aux_outputs']):\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n if loss == 'masks':\n continue\n kwargs = {}\n if loss == 'labels':\n kwargs = {'log': False}\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)\n l_dict = {k + f'_{i}': v for k, v in l_dict.items()}\n losses.update(l_dict)\n return losses", + "docstring": "This performs the loss computation. Parameters: outputs: dict of tensors, see the output specification of the model for the format targets: list of dicts, such that len(targets) == batch_size. The expected keys in each dict depends on the losses applied, see each loss' doc", + "type": "method", + "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py", + "ast_data": "FunctionDef name:forward arg:self arg:outputs arg:targets arguments arg arg arg Assign Call Compare Assign Call Assign Call Call Assign Call Call Call Call If Call Call Assign Call Call Call Assign For Call Call If Compare For Call Assign Call For If Compare Assign If Compare Assign Assign Call Assign Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_n_features_out", + "source_code": "@property\ndef _n_features_out(self):\n return self.components_.shape[0]", + "docstring": "Number of transformed output features.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_factor_analysis.py", + "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "z", + "source_code": "@property\ndef z(self) -> Tensor:\n return self.data[..., 3]", + "docstring": "Return the :math: with shape :math:.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\quaternion.py", + "ast_data": "FunctionDef name:z arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "freqresp", + "source_code": "def freqresp(self, w=None, n=10000):\n return freqresp(self, w=w, n=n)", + "docstring": "Calculate the frequency response of a continuous-time system. Returns a 2-tuple containing arrays of frequencies [rad/s] and complex magnitude. See for details.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:freqresp arg:self arg:w arg:n arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "reset", + "source_code": "def reset():\n torch._C._lazy._reset_metrics()", + "docstring": "Resets all metric counters.", + "type": "function", + "file_path": "pytorch\\torch\\_lazy\\metrics.py", + "ast_data": "FunctionDef name:reset arguments Call" + }, + { + "library": "matplotlib", + "name": "update_position", + "source_code": "def update_position(self, loc):\n self.tick1line.set_xdata((loc,))\n self.tick2line.set_xdata((loc,))\n self.gridline.set_xdata((loc,))\n self.label1.set_x(loc)\n self.label2.set_x(loc)\n self._loc = loc\n self.stale = True", + "docstring": "Set the location of tick in data coords with scalar *loc*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:update_position arg:self arg:loc arguments arg arg Call Call Call Call Call Assign Assign" + }, + { + "library": "numpy", + "name": "masked_where", + "source_code": "def masked_where(condition, a, copy=True):\n cond = make_mask(condition, shrink=False)\n a = np.array(a, copy=copy, subok=True)\n cshape, ashape = (cond.shape, a.shape)\n if cshape and cshape != ashape:\n raise IndexError('Inconsistent shape between the condition and the input (got %s and %s)' % (cshape, ashape))\n if hasattr(a, '_mask'):\n cond = mask_or(cond, a._mask)\n cls = type(a)\n else:\n cls = MaskedArray\n result = a.view(cls)\n result.mask = _shrink_mask(cond)\n if not copy and hasattr(a, '_mask') and (getmask(a) is nomask):\n a._mask = result._mask.view()\n return result", + "docstring": "Mask an array where a condition is met. Return as an array masked where is True. Any masked values of or are also masked in the output. Parameters ---------- condition : array_like Masking condition. When tests floating point values for equality, consider using `aaaconditionconditiona` contain masked values. >>> a = np.arange(4) >>> a = ma.masked_where(a == 2, a) >>> a masked_array(data=[0, 1, --, 3], mask=[False, False, True, False], fill_value=999999) >>> b = np.arange(4) >>> b = ma.masked_where(b == 0, b) >>> b masked_array(data=[--, 1, 2, 3], mask=[ True, False, False, False], fill_value=999999) >>> ma.masked_where(a == 3, b) masked_array(data=[--, 1, --, --], mask=[ True, False, True, True], fill_value=999999)", + "type": "function", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:masked_where arg:condition arg:a arg:copy arguments arg arg arg Assign Call Assign Call Assign If BoolOp Compare Raise Call If Call Assign Call Assign Call Assign Assign Call Assign Call If BoolOp Call Compare Call Assign Call Return return:yes" + }, + { + "library": "cryptography", + "name": "verify", + "source_code": "@abc.abstractmethod\ndef verify(self, signature: Buffer, data: Buffer) -> None:\n pass", + "docstring": "Verify the signature.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed25519.py", + "ast_data": "FunctionDef name:verify arg:self arg:signature arg:data arguments arg arg arg" + }, + { + "library": "tensorflow", + "name": "get_distributed_datasets_from_function", + "source_code": "def get_distributed_datasets_from_function(dataset_fn, input_workers, input_contexts, strategy, options=None, build=True, replica_order=None):\n if options is not None and options.experimental_replication_mode != input_lib.InputReplicationMode.PER_REPLICA and options.experimental_place_dataset_on_device:\n raise ValueError('When `experimental_place_dataset_on_device` is set for dataset placement, you must also specify `PER_REPLICA` for the replication mode')\n if options is not None and options.experimental_replication_mode == input_lib.InputReplicationMode.PER_REPLICA and options.experimental_fetch_to_device and options.experimental_place_dataset_on_device:\n raise ValueError('`experimental_place_dataset_on_device` can not be set to True when experimental_fetch_to_device is True and replication mode is set to `PER_REPLICA`')\n if tf2.enabled():\n return input_lib.DistributedDatasetsFromFunction(input_workers, strategy, input_contexts=input_contexts, dataset_fn=dataset_fn, options=options, build=build, replica_order=replica_order)\n else:\n return input_lib_v1.DistributedDatasetsFromFunctionV1(input_workers, strategy, input_contexts, dataset_fn, options)", + "docstring": "Returns a distributed dataset from the given input function. This is a common function that is used by all strategies to return a distributed dataset. The distributed dataset instance returned is different depending on if we are in a TF 1 or TF 2 context. The distributed dataset instances returned differ from each other in the APIs supported by each of them. Args: dataset_fn: a function that returns a tf.data.Dataset instance. input_workers: an InputWorkers object which specifies devices on which iterators should be created. input_contexts: A list of instances to be passed to call(s) to . Length and order should match worker order in . strategy: a object, used to run all-reduce to handle last partial batch. options: Default is None. used to control options on how this dataset is distributed. build: whether to build underlying datasets when a is created. This is only useful for now. replica_order: the order of the replicas, which will be used to reorder the iterators to match the device order. Returns: A distributed dataset instance. Raises: ValueError: if and are not consistent", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_util.py", + "ast_data": "FunctionDef name:get_distributed_datasets_from_function arg:dataset_fn arg:input_workers arg:input_contexts arg:strategy arg:options arg:build arg:replica_order arguments arg arg arg arg arg arg arg If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Raise Call If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "LecunNormal", + "source_code": "class LecunNormal(VarianceScaling):\n\n def __init__(self, seed=None):\n super(LecunNormal, self).__init__(scale=1.0, mode='fan_in', distribution='truncated_normal', seed=seed)\n\n def get_config(self):\n return {'seed': self.seed}", + "docstring": "Lecun normal initializer. Also available via the shortcut function . Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Draws samples from a truncated normal distribution centered on 0 with where is the number of input units in the weight tensor. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.LecunNormal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.LecunNormal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. Used to seed the random generator. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017] ( ([pdf] ( - Efficient Backprop, [Lecun et al., 1998](", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py", + "ast_data": "ClassDef name:LecunNormal FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, *args, zs=0, zdir='z', depthshade=None, depthshade_minalpha=None, axlim_clip=False, **kwargs):\n if depthshade is None:\n depthshade = rcParams['axes3d.depthshade']\n if depthshade_minalpha is None:\n depthshade_minalpha = rcParams['axes3d.depthshade_minalpha']\n self._depthshade = depthshade\n self._depthshade_minalpha = depthshade_minalpha\n super().__init__(*args, **kwargs)\n self.set_3d_properties(zs, zdir, axlim_clip)", + "docstring": "Create a collection of flat 3D patches with its normal vector pointed in *zdir* direction, and located at *zs* on the *zdir* axis. 'zs' can be a scalar or an array-like of the same length as the number of patches in the collection. Constructor arguments are the same as for :class:. In addition, keywords *zs=0* and *zdir='z'* are available. The keyword argument *depthshade* is available to indicate whether or not to shade the patches in order to give the appearance of depth (default is *True*). This is typically desired in scatter plots. *depthshade_minalpha* sets the minimum alpha value applied by depth-shading.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg arg arg If Compare Assign If Compare Assign Assign Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "_get_module_path_and_prefix", + "source_code": "def _get_module_path_and_prefix(obs_node: Node, node_name_to_scope: dict[str, tuple[str, type]], node_name_to_qconfig: dict[str, QConfigAny]) -> tuple[str, str]:\n observed_node = obs_node.args[0]\n assert isinstance(observed_node, Node), f'Expecting observed node to be a Node, but got {observed_node}'\n is_input_observer_only = node_name_to_qconfig[observed_node.name] is None if observed_node.name in node_name_to_qconfig else None\n if is_input_observer_only:\n users = list(obs_node.users)\n first_linear_use_or_first_use = users[0] if users else None\n linear_node = None\n for n in users:\n if n.op == 'call_function' and n.target == torch.nn.functional.linear:\n linear_node = n\n break\n if linear_node:\n first_linear_use_or_first_use = linear_node\n prefix = '_input'\n else:\n first_linear_use_or_first_use = observed_node\n prefix = ''\n if first_linear_use_or_first_use and first_linear_use_or_first_use.name in node_name_to_scope:\n module_path, _ = node_name_to_scope[first_linear_use_or_first_use.name]\n else:\n module_path = ''\n return (module_path, prefix)", + "docstring": "Given and observer node, get the or the fully qualified name for the submodule containing the observed node, also return a prefix of \"_input\" when the observed node is an input of a F.linear op, and not the output of another quantized op. TODO: this logic is hacky, we should think about how to remove it or make it more general", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\convert.py", + "ast_data": "FunctionDef name:_get_module_path_and_prefix arg:obs_node arg:node_name_to_scope arg:node_name_to_qconfig arguments arg arg arg Assign Call Assign Compare Compare If Assign Call Assign Assign For If BoolOp Compare Compare Assign If Assign Assign Assign Assign If BoolOp Compare Assign Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, base, offset):\n self._base = base\n self.offset = offset", + "docstring": "Place ticks every *base* data point, starting at *offset*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:base arg:offset arguments arg arg arg Assign Assign" + }, + { + "library": "tensorflow", + "name": "tensorflow_version", + "source_code": "def tensorflow_version(self):\n return self._reader.tensorflow_version()", + "docstring": "TensorFlow version used in the debugged TensorFlow program. Note: this is not necessarily the same as the version of TensorFlow used to load the DebugEvent file set. Returns: TensorFlow version used by the debugged program, as a .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", + "ast_data": "FunctionDef name:tensorflow_version arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "to_feather", + "source_code": "@doc(storage_options=_shared_docs['storage_options'])\ndef to_feather(df: DataFrame, path: FilePath | WriteBuffer[bytes], storage_options: StorageOptions | None=None, **kwargs: Any) -> None:\n import_optional_dependency('pyarrow')\n from pyarrow import feather\n if not isinstance(df, DataFrame):\n raise ValueError('feather only support IO with DataFrames')\n with get_handle(path, 'wb', storage_options=storage_options, is_text=False) as handles:\n feather.write_feather(df, handles.handle, **kwargs)", + "docstring": "Write a DataFrame to the binary Feather format. Parameters ---------- df : DataFrame path : str, path object, or file-like object {storage_options} **kwargs : Additional keywords passed to .", + "type": "function", + "file_path": "pandas\\pandas\\io\\feather_format.py", + "ast_data": "FunctionDef name:to_feather arg:df arg:path arg:storage_options arguments arg arg arg arg Call If Call Raise Call With Call Call Call" + }, + { + "library": "kornia", + "name": "ConvSoftArgmax3d", + "source_code": "class ConvSoftArgmax3d(Module):\n\n def __init__(self, kernel_size: tuple[int, int, int]=(3, 3, 3), stride: tuple[int, int, int]=(1, 1, 1), padding: tuple[int, int, int]=(1, 1, 1), temperature: Tensor | float=1.0, normalized_coordinates: bool=False, eps: float=1e-08, output_value: bool=True, strict_maxima_bonus: float=0.0) -> None:\n super().__init__()\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.temperature = temperature\n self.normalized_coordinates = normalized_coordinates\n self.eps = eps\n self.output_value = output_value\n self.strict_maxima_bonus = strict_maxima_bonus\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}, temperature={self.temperature}, normalized_coordinates={self.normalized_coordinates}, eps={self.eps}, strict_maxima_bonus={self.strict_maxima_bonus}, output_value={self.output_value})'\n\n def forward(self, x: Tensor) -> Tensor | tuple[Tensor, Tensor]:\n return conv_soft_argmax3d(x, self.kernel_size, self.stride, self.padding, self.temperature, self.normalized_coordinates, self.eps, self.output_value, self.strict_maxima_bonus)", + "docstring": "Module that calculates soft argmax 3d per window. See :func: for details.", + "type": "class", + "file_path": "kornia\\kornia\\geometry\\subpix\\spatial_soft_argmax.py", + "ast_data": "ClassDef name:ConvSoftArgmax3d FunctionDef name:__init__ arg:self arg:kernel_size arg:stride arg:padding arg:temperature arg:normalized_coordinates arg:eps arg:output_value arg:strict_maxima_bonus arguments arg arg arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes Call" + }, + { + "library": "authlib", + "name": "get_audiences", + "source_code": "def get_audiences(self, request):\n client = request.client\n return [client.get_client_id()]", + "docstring": "Parse value for id_token, default value is client id. Developers MAY rewrite this method to provide a customized audience value.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\core\\grants\\implicit.py", + "ast_data": "FunctionDef name:get_audiences arg:self arg:request arguments arg arg Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "__init__", + "source_code": "def __init__(self, dim=None, seed=None):\n self._dist = ortho_group_gen(seed)\n self.dim = self._dist._process_parameters(dim)", + "docstring": "Create a frozen O(N) distribution. Parameters ---------- dim : scalar Dimension of matrices seed : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `seedseed` instance then that instance is used. Examples -------- >>> from scipy.stats import ortho_group >>> g = ortho_group(5) >>> x = g.rvs()", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:dim arg:seed arguments arg arg arg Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name=None):\n super(ExponentialDecay, self).__init__()\n self.initial_learning_rate = initial_learning_rate\n self.decay_steps = decay_steps\n self.decay_rate = decay_rate\n self.staircase = staircase\n self.name = name", + "docstring": "Applies exponential decay to the learning rate. Args: initial_learning_rate: A scalar or or a Python number. The initial learning rate. decay_steps: A scalar or or a Python number. Must be positive. See the decay computation above. decay_rate: A scalar or or a Python number. The decay rate. staircase: Boolean. If decay the learning rate at discrete intervals name: String. Optional name of the operation. Defaults to 'ExponentialDecay'.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\learning_rate_schedule.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:initial_learning_rate arg:decay_steps arg:decay_rate arg:staircase arg:name arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "DeterminePeakMemoryUsage", + "source_code": "def DeterminePeakMemoryUsage(self, item):\n return tf_cluster.TF_DeterminePeakMemoryUsage(item.tf_item, self._tf_cluster)", + "docstring": "Returns a snapshot of the peak memory usage. Args: item: The item for which to measure the costs. Returns: A hashtable indexed by device name.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\grappler\\cluster.py", + "ast_data": "FunctionDef name:DeterminePeakMemoryUsage arg:self arg:item arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "tuple_to_qfont", + "source_code": "def tuple_to_qfont(tup):\n if not (isinstance(tup, tuple) and len(tup) == 4 and font_is_installed(tup[0]) and isinstance(tup[1], Integral) and isinstance(tup[2], bool) and isinstance(tup[3], bool)):\n return None\n font = QtGui.QFont()\n family, size, italic, bold = tup\n font.setFamily(family)\n font.setPointSize(size)\n font.setItalic(italic)\n font.setBold(bold)\n return font", + "docstring": "Create a QFont from tuple: (family [string], size [int], italic [bool], bold [bool])", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\qt_editor\\_formlayout.py", + "ast_data": "FunctionDef name:tuple_to_qfont arg:tup arguments arg If BoolOp Call Compare Call Call Call Call Call Return return:no Assign Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "__len__", + "source_code": "def __len__(self):\n return len(self.estimators_)", + "docstring": "Return the number of estimators in the ensemble.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_base.py", + "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "sparse_segment_mean", + "source_code": "@tf_export(v1=['sparse.segment_mean', 'sparse_segment_mean'])\n@deprecation.deprecated_endpoints('sparse_segment_mean')\ndef sparse_segment_mean(data, indices, segment_ids, name=None, num_segments=None, sparse_gradient=False):\n if num_segments is not None:\n return gen_math_ops.sparse_segment_mean_with_num_segments(data=data, indices=indices, segment_ids=segment_ids, num_segments=num_segments, name=name, sparse_gradient=sparse_gradient)\n else:\n return gen_math_ops.sparse_segment_mean(data=data, indices=indices, segment_ids=segment_ids, name=name, sparse_gradient=sparse_gradient)", + "docstring": "Computes the mean along sparse segments of a tensor. Read [the section on segmentation]( for an explanation of segments. Like , but can have rank less than 's first dimension, selecting a subset of dimension 0, specified by . is allowed to have missing ids, in which case the output will be zeros at those indices. In those cases is used to determine the size of the output. Args: data: A with data that will be assembled in the output. indices: A 1-D with indices into . Has same rank as . segment_ids: A 1-D with indices into the output . Values should be sorted and can be repeated. name: A name for the operation (optional). num_segments: An optional int32 scalar. Indicates the size of the output . sparse_gradient: An optional . Defaults to . If , the gradient of this function will be sparse () instead of dense (). The sparse gradient will contain one non-zero row for each unique index in . Returns: A of the shape as data, except for dimension 0 which has size , the number of segments specified via or inferred for the last element in .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:sparse_segment_mean arg:data arg:indices arg:segment_ids arg:name arg:num_segments arg:sparse_gradient arguments arg arg arg arg arg arg If Compare Return return:yes Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "can_decode", + "source_code": "def can_decode(self, value):\n return value.HasField('type_spec_value')", + "docstring": "Returns true if can be decoded into a .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py", + "ast_data": "FunctionDef name:can_decode arg:self arg:value arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "set_per_process_memory_fraction", + "source_code": "def set_per_process_memory_fraction(fraction) -> None:\n if not isinstance(fraction, float):\n raise TypeError('Invalid type for fraction argument, must be `float`')\n if fraction < 0 or fraction > 2:\n raise ValueError(f'Invalid fraction value: {fraction}. Allowed range: 0~2')\n torch._C._mps_setMemoryFraction(fraction)", + "docstring": "Set memory fraction for limiting process's memory allocation on MPS device. The allowed value equals the fraction multiplied by recommended maximum device memory (obtained from Metal API device.recommendedMaxWorkingSetSize). If trying to allocate more than the allowed value in a process, it will raise an out of memory error in allocator. Args: fraction(float): Range: 0~2. Allowed memory equals total_memory * fraction. .. note:: Passing 0 to fraction means unlimited allocations (may cause system failure if out of memory). Passing fraction greater than 1.0 allows limits beyond the value returned from device.recommendedMaxWorkingSetSize.", + "type": "function", + "file_path": "pytorch\\torch\\mps\\__init__.py", + "ast_data": "FunctionDef name:set_per_process_memory_fraction arg:fraction arguments arg If Call Raise Call If BoolOp Compare Compare Raise Call Call" + }, + { + "library": "pandas", + "name": "_get_column_array", + "source_code": "def _get_column_array(self, i: int) -> ArrayLike:\n return self._mgr.iget_values(i)", + "docstring": "Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes).", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:_get_column_array arg:self arg:i arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "supports_complex", + "source_code": "def supports_complex(reduceOp: ReduceOp) -> bool:\n denyList = [ReduceOp.MAX, ReduceOp.MIN, ReduceOp.PRODUCT, ReduceOp.BAND, ReduceOp.BOR, ReduceOp.BXOR]\n return reduceOp not in denyList", + "docstring": "Return true if reduce ops is supported. False otherwise.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:supports_complex arg:reduceOp arguments arg Assign Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "test_function", + "source_code": "def test_function(iterator):\n return step_function(self, iterator)", + "docstring": "Runs an evaluation execution with one step.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", + "ast_data": "FunctionDef name:test_function arg:iterator arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "create_diagonal", + "source_code": "def create_diagonal(x: Array, /, *, offset: int=0, xp: ModuleType | None=None) -> Array:\n if xp is None:\n xp = array_namespace(x)\n if x.ndim == 0:\n err_msg = '`x` must be at least 1-dimensional.'\n raise ValueError(err_msg)\n x_shape = eager_shape(x)\n batch_dims = x_shape[:-1]\n n = x_shape[-1] + abs(offset)\n diag = xp.zeros((*batch_dims, n ** 2), dtype=x.dtype, device=_compat.device(x))\n target_slice = slice(offset if offset >= 0 else abs(offset) * n, min(n * (n - offset), diag.shape[-1]), n + 1)\n for index in ndindex(*batch_dims):\n diag = at(diag)[*index, target_slice].set(x[*index, slice(None)])\n return xp.reshape(diag, (*batch_dims, n, n))", + "docstring": "Construct a diagonal array. Parameters ---------- x : array An array having shape `xxoffset`). Examples -------- >>> import array_api_strict as xp >>> import array_api_extra as xpx >>> x = xp.asarray([2, 4, 8]) >>> xpx.create_diagonal(x, xp=xp) Array([[2, 0, 0], [0, 4, 0], [0, 0, 8]], dtype=array_api_strict.int64) >>> xpx.create_diagonal(x, offset=-2, xp=xp) Array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [2, 0, 0, 0, 0], [0, 4, 0, 0, 0], [0, 0, 8, 0, 0]], dtype=array_api_strict.int64)", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_funcs.py", + "ast_data": "FunctionDef name:create_diagonal arguments arg arg arg If Compare Assign Call If Compare Assign Raise Call Assign Call Assign Assign Call Assign Call Call Assign Call Compare Call Call For Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "Pathological", + "source_code": "class Pathological(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n self.global_optimum = [[0 for _ in range(self.N)]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n vec = 0.5 + (sin(sqrt(100 * x[:-1] ** 2 + x[1:] ** 2)) ** 2 - 0.5) / (1.0 + 0.001 * (x[:-1] ** 2 - 2 * x[:-1] * x[1:] + x[1:] ** 2) ** 2)\n return sum(vec)", + "docstring": "Pathological objective function. This class defines the Pathological [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Pathological}}(x) = \\sum_{i=1}^{n -1} \\frac{\\sin^{2}\\left( \\sqrt{100 x_{i+1}^{2} + x_{i}^{2}}\\right) -0.5}{0.001 \\left(x_{i}^{2} - 2x_{i}x_{i+1} + x_{i+1}^{2}\\right)^{2} + 0.50} Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py", + "ast_data": "ClassDef name:Pathological FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "intersect_trust_region", + "source_code": "def intersect_trust_region(x, s, Delta):\n a = np.dot(s, s)\n if a == 0:\n raise ValueError('`s` is zero.')\n b = np.dot(x, s)\n c = np.dot(x, x) - Delta ** 2\n if c > 0:\n raise ValueError('`x` is not within the trust region.')\n d = np.sqrt(b * b - a * c)\n q = -(b + copysign(d, b))\n t1 = q / a\n t2 = c / q\n if t1 < t2:\n return (t1, t2)\n else:\n return (t2, t1)", + "docstring": "Find the intersection of a line with the boundary of a trust region. This function solves the quadratic equation with respect to t ||(x + s*t)||**2 = Delta**2. Returns ------- t_neg, t_pos : tuple of float Negative and positive roots. Raises ------ ValueError If is zero or is not within the trust region.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py", + "ast_data": "FunctionDef name:intersect_trust_region arg:x arg:s arg:Delta arguments arg arg arg Assign Call If Compare Raise Call Assign Call Assign Call If Compare Raise Call Assign Call Assign Call Assign Assign If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "stop", + "source_code": "def stop(self) -> None:\n self._stop()", + "docstring": "Stops the server. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while stopping the server.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py", + "ast_data": "FunctionDef name:stop arg:self arguments arg Call" + }, + { + "library": "pytorch", + "name": "_check_flat_params_on_expected_device", + "source_code": "def _check_flat_params_on_expected_device(state: _FSDPState, module: nn.Module):\n cpu_device = torch.device('cpu')\n for handle in traversal_utils._get_fsdp_handles(module):\n if not handle._offload_params and handle.flat_param.device != state.compute_device:\n raise RuntimeError(f'An FSDP-managed module unexpectedly has parameters on {handle.flat_param.device}. Make sure to move the module to {state.compute_device} before training.')\n elif handle._offload_params and handle.flat_param.device != cpu_device:\n raise RuntimeError(f'An FSDP-managed module with parameter CPU offloading enabled has parameters on {handle.flat_param.device}. Make sure to not move the module from CPU when offloading parameters.')", + "docstring": "Checks that all `` are on the expected device for *lazy initialization*.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py", + "ast_data": "FunctionDef name:_check_flat_params_on_expected_device arg:state arg:module arguments arg arg Assign Call For Call If BoolOp Compare Raise Call If BoolOp Compare Raise Call" + }, + { + "library": "tensorflow", + "name": "unique_name", + "source_code": "def unique_name(self, name, mark_as_used=True) -> str:\n if self._name_stack:\n name = self._name_stack + '/' + name\n name_key = name.lower()\n i = self._names_in_use.get(name_key, 0)\n if mark_as_used:\n self._names_in_use[name_key] = i + 1\n if i > 0:\n base_name_key = name_key\n while name_key in self._names_in_use:\n name_key = '%s_%d' % (base_name_key, i)\n i += 1\n if mark_as_used:\n self._names_in_use[name_key] = 1\n name = '%s_%d' % (name, i - 1)\n return name", + "docstring": "Return a unique operation name for . Note: You rarely need to call directly. Most of the time you just need to create blocks to generate structured names. is used to generate structured names, separated by , to help identify operations when debugging a graph. Operation names are displayed in error messages reported by the TensorFlow runtime, and in various visualization tools such as TensorBoard. If is set to , which is the default, a new unique name is created and marked as in use. If it's set to , the unique name is returned without actually being marked as used. This is useful when the caller simply wants to know what the name to be created will be. Args: name: The name for an operation. mark_as_used: Whether to mark this name as being used. Returns: A string to be passed to that will be used to name the operation being created.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:unique_name arg:self arg:name arg:mark_as_used arguments arg arg arg If Assign Assign Call Assign Call If Assign If Compare Assign While Compare Assign If Assign Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "xkcd", + "source_code": "def xkcd(scale: float=1, length: float=100, randomness: float=2) -> ExitStack:\n if rcParams['text.usetex']:\n raise RuntimeError('xkcd mode is not compatible with text.usetex = True')\n stack = ExitStack()\n stack.callback(rcParams._update_raw, rcParams.copy())\n from matplotlib import patheffects\n rcParams.update({'font.family': ['xkcd', 'xkcd Script', 'Comic Neue', 'Comic Sans MS'], 'font.size': 14.0, 'path.sketch': (scale, length, randomness), 'path.effects': [patheffects.withStroke(linewidth=4, foreground='w')], 'axes.linewidth': 1.5, 'lines.linewidth': 2.0, 'figure.facecolor': 'white', 'grid.linewidth': 0.0, 'axes.grid': False, 'axes.unicode_minus': False, 'axes.edgecolor': 'black', 'xtick.major.size': 8, 'xtick.major.width': 3, 'ytick.major.size': 8, 'ytick.major.width': 3})\n return stack", + "docstring": "Turn on _ sketch-style drawing mode. This will only have an effect on things drawn after this function is called. For best results, install the _ font; xkcd fonts are not packaged with Matplotlib. Parameters ---------- scale : float, optional The amplitude of the wiggle perpendicular to the source line. length : float, optional The length of the wiggle along the line. randomness : float, optional The scale factor by which the length is shrunken or expanded. Notes ----- This function works by a number of rcParams, overriding those set before. If you want the effects of this function to be temporary, it can be used as a context manager, for example:: with plt.xkcd(): # This figure will be in XKCD-style fig1 = plt.figure() # ... # This figure will be in regular style fig2 = plt.figure()", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", + "ast_data": "FunctionDef name:xkcd arg:scale arg:length arg:randomness arguments arg arg arg If Raise Call Assign Call Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "tensorflow_version", + "source_code": "def tensorflow_version(self):\n return self._tensorflow_version", + "docstring": "Get the version string of TensorFlow that the debugged program ran on.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", + "ast_data": "FunctionDef name:tensorflow_version arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "erfinv", + "source_code": "@tf_export('math.erfinv')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef erfinv(x, name=None):\n with ops.name_scope(name, 'erfinv', [x]):\n return gen_math_ops.erfinv(x)", + "docstring": "Compute inverse error function. Given , compute the inverse error function of . This function is the inverse of . Args: x: with type or . name: A name for the operation (optional). Returns: Inverse error function of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:erfinv arg:x arg:name arguments arg arg With Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "__init__", + "source_code": "def __init__(self, buffer: pa.Buffer, *, length: int) -> None:\n self._buffer = buffer\n self._length = length", + "docstring": "Handle pyarrow chunked arrays.", + "type": "method", + "file_path": "pandas\\pandas\\core\\interchange\\buffer.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:buffer arguments arg arg arg Assign Assign" + }, + { + "library": "pytorch", + "name": "try_finally", + "source_code": "def try_finally(self, code_options, cleanup: list[Instruction]):\n load_args = []\n if self.target_values:\n load_args = [create_load_const(val) for val in self.target_values]\n ctx_name = unique_id(f'___context_manager_{self.stack_index}')\n if ctx_name not in code_options['co_varnames']:\n code_options['co_varnames'] += (ctx_name,)\n for name in ['__enter__', '__exit__']:\n if name not in code_options['co_names']:\n code_options['co_names'] += (name,)\n create_ctx: list[Instruction] = []\n _initial_push_null(create_ctx)\n create_ctx.extend([*load_args, *create_call_function(len(load_args), False), create_instruction('STORE_FAST', argval=ctx_name)])\n\n def _template(ctx, dummy):\n ctx.__enter__()\n try:\n dummy\n finally:\n ctx.__exit__(None, None, None)\n setup_try_finally, epilogue = _bytecode_from_template_with_split(_template, self.stack_index, varname_map={'ctx': ctx_name})\n cleanup[:] = epilogue + cleanup\n return create_ctx + setup_try_finally", + "docstring": "Codegen based off of: load args enter context try: (rest) finally: exit context", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\resume_execution.py", + "ast_data": "FunctionDef name:try_finally arg:self arg:code_options arg:cleanup arguments arg arg arg Assign If Assign Call Assign Call If Compare For If Compare Call Call Call Call Call FunctionDef name:_template arg:ctx arg:dummy arguments arg arg Call Try Call Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_log_modified_bessel_fn", + "source_code": "def _log_modified_bessel_fn(x, order=0):\n assert order == 0 or order == 1\n y = x / 3.75\n y = y * y\n small = _eval_poly(y, _COEF_SMALL[order])\n if order == 1:\n small = x.abs() * small\n small = small.log()\n y = 3.75 / x\n large = x - 0.5 * x.log() + _eval_poly(y, _COEF_LARGE[order]).log()\n result = torch.where(x < 3.75, small, large)\n return result", + "docstring": "Returns `order` is either 0 or 1.", + "type": "function", + "file_path": "pytorch\\torch\\distributions\\von_mises.py", + "ast_data": "FunctionDef name:_log_modified_bessel_fn arg:x arg:order arguments arg arg BoolOp Compare Compare Assign Assign Assign Call If Compare Assign Call Assign Call Assign Assign Call Call Call Assign Call Compare Return return:yes" + }, + { + "library": "scrapy", + "name": "UserAgentMiddleware", + "source_code": "class UserAgentMiddleware:\n\n def __init__(self, user_agent: str='Scrapy'):\n self.user_agent = user_agent\n\n @classmethod\n def from_crawler(cls, crawler: Crawler) -> Self:\n o = cls(crawler.settings['USER_AGENT'])\n crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)\n return o\n\n def spider_opened(self, spider: Spider) -> None:\n self.user_agent = getattr(spider, 'user_agent', self.user_agent)\n\n def process_request(self, request: Request, spider: Spider) -> Request | Response | None:\n if self.user_agent:\n request.headers.setdefault(b'User-Agent', self.user_agent)\n return None", + "docstring": "This middleware allows spiders to override the user_agent", + "type": "class", + "file_path": "scrapy\\scrapy\\downloadermiddlewares\\useragent.py", + "ast_data": "ClassDef name:UserAgentMiddleware FunctionDef name:__init__ arg:self arg:user_agent arguments arg arg Assign FunctionDef name:from_crawler arg:cls arg:crawler arguments arg arg Assign Call Call Return return:yes FunctionDef name:spider_opened arg:self arg:spider arguments arg arg Assign Call FunctionDef name:process_request arg:self arg:request arg:spider arguments arg arg arg If Call Return return:no" + }, + { + "library": "matplotlib", + "name": "set_figure", + "source_code": "def set_figure(self, fig):\n super().set_figure(fig)\n for c in self.get_children():\n c.set_figure(fig)", + "docstring": "Set the for the and all its children. Parameters ---------- fig :", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", + "ast_data": "FunctionDef name:set_figure arg:self arg:fig arguments arg arg Call Call For Call Call" + }, + { + "library": "numpy", + "name": "fullapi_hash", + "source_code": "def fullapi_hash(api_dicts):\n a = []\n for d in api_dicts:\n d = d.copy()\n d.pop('__unused_indices__', None)\n for name, data in order_dict(d):\n a.extend(name)\n a.extend(','.join(map(str, data)))\n return hashlib.md5(''.join(a).encode('ascii'), usedforsecurity=False).hexdigest()", + "docstring": "Given a list of api dicts defining the numpy C API, compute a checksum of the list of items in the API (as a string).", + "type": "function", + "file_path": "numpy\\numpy\\_core\\code_generators\\genapi.py", + "ast_data": "FunctionDef name:fullapi_hash arg:api_dicts arguments arg Assign For Assign Call Call For Call Call Call Call Call Return return:yes Call Call Call Call" + }, + { + "library": "kornia", + "name": "make_2tuple", + "source_code": "def make_2tuple(x):\n if isinstance(x, tuple):\n KORNIA_CHECK(len(x) == 2)\n return x\n KORNIA_CHECK(isinstance(x, int))\n return (x, x)", + "docstring": "Make a tuple of length 2.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\layers\\patch_embed.py", + "ast_data": "FunctionDef name:make_2tuple arg:x arguments arg If Call Call Compare Call Return return:yes Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "_update_settings", + "source_code": "def _update_settings(self, config: Config) -> None:\n self.settings['input_encoding'] = config.source_encoding\n self.settings['trim_footnote_reference_space'] = config.trim_footnote_reference_space\n self.settings['language_code'] = config.language\n self.settings.setdefault('smart_quotes', True)", + "docstring": "Update settings by new config.", + "type": "method", + "file_path": "sphinx\\sphinx\\environment\\__init__.py", + "ast_data": "FunctionDef name:_update_settings arg:self arg:config arguments arg arg Assign Assign Assign Call" + }, + { + "library": "tensorflow", + "name": "IsTrainable", + "source_code": "def IsTrainable(tensor_or_dtype):\n if tensor_util.is_tf_type(tensor_or_dtype):\n dtype = _DTypeFromTensor(tensor_or_dtype)\n else:\n dtype = tensor_or_dtype\n dtype = dtypes.as_dtype(dtype)\n trainable_dtypes = [dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128, dtypes.resource, dtypes.variant, dtypes.bfloat16]\n if flags.config().enable_quantized_dtypes_training.value():\n trainable_dtypes.extend([dtypes.qint8, dtypes.qint16, dtypes.qint32, dtypes.quint8, dtypes.quint16])\n return dtype.base_dtype in trainable_dtypes", + "docstring": "Determines whether a tensor or dtype supports infinitesimal changes.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop_util.py", + "ast_data": "FunctionDef name:IsTrainable arg:tensor_or_dtype arguments arg If Call Assign Call Assign Assign Call Assign If Call Call Call Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "_import_module", + "source_code": "@functools.cache\ndef _import_module(name: str) -> types.ModuleType:\n return importlib.import_module(name)", + "docstring": "Import the named module and cache the result. importlib.import_module() seems to do some filesystem checking to validate the name so not caching this can be slow.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\symbolic_convert.py", + "ast_data": "FunctionDef name:_import_module arg:name arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "mask_hash", + "source_code": "def mask_hash(hash, show=6, char='*'):\n masked = hash[:show]\n masked += char * len(hash[show:])\n return masked", + "docstring": "Return the given hash, with only the first `` for security reasons.", + "type": "function", + "file_path": "django\\django\\contrib\\auth\\hashers.py", + "ast_data": "FunctionDef name:mask_hash arg:hash arg:show arg:char arguments arg arg arg Assign Call Return return:yes" + }, + { + "library": "pygame", + "name": "pixels_green", + "source_code": "def pixels_green(surface):\n return numpy.array(surface.get_view('G'), copy=False)", + "docstring": "pygame.surfarray.pixels_green(Surface): return array Reference pixel green into a 2d array. Create a new 2D array that directly references the green values in a Surface. Any changes to the array will affect the pixels in the Surface. This is a fast operation since no data is copied. This can only work on 24-bit or 32-bit Surfaces. The Surface this array references will remain locked for the lifetime of the array.", + "type": "function", + "file_path": "pygame\\src_py\\surfarray.py", + "ast_data": "FunctionDef name:pixels_green arg:surface arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "to_sparse_coo", + "source_code": "def to_sparse_coo(self):\n return self.to_sparse()", + "docstring": "Convert a tensor to :ref:. Examples:: >>> dense = torch.randn(5, 5) >>> sparse = dense.to_sparse_coo() >>> sparse._nnz() 25", + "type": "method", + "file_path": "pytorch\\torch\\_tensor.py", + "ast_data": "FunctionDef name:to_sparse_coo arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_i_will_not_complain_if_bc_breaks_InstructionTranslator", + "source_code": "def _i_will_not_complain_if_bc_breaks_InstructionTranslator(self):\n return self.__tx", + "docstring": "Returns the internal data structure InstructionTranslator that Dynamo uses to track state of symbolic evaluation. There are no BC guarantees on this API and WE RESERVE THE RIGHT TO BREAK YOUR CODE if you rely on it.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\comptime.py", + "ast_data": "FunctionDef name:_i_will_not_complain_if_bc_breaks_InstructionTranslator arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_get_estimator", + "source_code": "def _get_estimator(self):\n if self.estimator is None:\n return DecisionTreeRegressor()\n return self.estimator", + "docstring": "Resolve which estimator to return (default is DecisionTreeClassifier)", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py", + "ast_data": "FunctionDef name:_get_estimator arg:self arguments arg If Compare Return return:yes Call Return return:yes" + }, + { + "library": "numpy", + "name": "order", + "source_code": "@property\ndef order(self):\n return len(self._coeffs) - 1", + "docstring": "The order or degree of the polynomial", + "type": "method", + "file_path": "numpy\\numpy\\lib\\_polynomial_impl.py", + "ast_data": "FunctionDef name:order arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_verbose_printer", + "source_code": "def _verbose_printer(verbose: bool | None) -> Callable[..., None]:\n if verbose is False:\n return lambda *_, **__: None\n return lambda *args, **kwargs: print('[torch.onnx]', *args, **kwargs)", + "docstring": "Prints messages based on .", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_capture_strategies.py", + "ast_data": "FunctionDef name:_verbose_printer arg:verbose arguments arg If Compare Return return:yes arguments arg arg Return return:yes arguments arg arg Call" + }, + { + "library": "pytorch", + "name": "zero_step", + "source_code": "def zero_step(fut: torch.futures.Future) -> torch.Tensor:\n overlap_info = zero._overlap_info\n bucket_index = bucket.index()\n rank = zero.global_rank\n assigned_ranks = overlap_info.assigned_ranks_per_bucket[bucket_index]\n overlap_info.bucket_indices_seen.append(bucket_index)\n if rank in assigned_ranks:\n _perform_local_step(bucket, zero, rank)\n _broadcast_bucket(bucket_index, zero)\n num_buckets = len(overlap_info.params_per_bucket)\n if len(overlap_info.bucket_indices_seen) == num_buckets:\n overlap_info.wait_for_broadcasts()\n overlap_info.clear_per_iter_info()\n return bucket.buffer()", + "docstring": "Perform partial :class: :meth: using gradients in the :class:. Returns: A :class: representing the contents of the gradient bucket.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\ddp_zero_hook.py", + "ast_data": "FunctionDef name:zero_step arg:fut arguments arg Assign Assign Call Assign Assign Call If Compare Call Call Assign Call If Compare Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "num_shards_map", + "source_code": "@property\ndef num_shards_map(self) -> list[int]:\n r = [1] * self.ndim\n for i, placement in enumerate(self.placements):\n if placement.is_shard():\n shard_dim = cast(Shard, placement).dim\n r[shard_dim] *= self.mesh.size(i)\n return r", + "docstring": "dim_map is a property we derive from of the distributed tensor. Unlike , denotes how many shards each tensor dim has. Like : len(num_shards_map) == dist_tensor.ndim num_shards_map[i] = 1: means tensor dim i is not sharded num_shards_map[i] = j: means tensor dim i has j shards in total For example, we have a dist tensor of shape [18, 20, 30], a device_mesh ([[0, 1, 2, 3], [4, 5, 6, 7]]), and placements ([Shard(1), Shard(0)]), the num_shards_map of this distributed tensor would be: [4, 2, 1].", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_dtensor_spec.py", + "ast_data": "FunctionDef name:num_shards_map arg:self arguments arg Assign For Call If Call Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_rmin", + "source_code": "def get_rmin(self):\n return self.viewLim.ymin", + "docstring": "Returns ------- float The inner radial limit.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py", + "ast_data": "FunctionDef name:get_rmin arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "RandomUnstructured", + "source_code": "class RandomUnstructured(BasePruningMethod):\n PRUNING_TYPE = 'unstructured'\n\n def __init__(self, amount):\n _validate_pruning_amount_init(amount)\n self.amount = amount\n\n def compute_mask(self, t, default_mask):\n tensor_size = t.nelement()\n nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)\n _validate_pruning_amount(nparams_toprune, tensor_size)\n mask = default_mask.clone(memory_format=torch.contiguous_format)\n if nparams_toprune != 0:\n prob = torch.rand_like(t)\n topk = torch.topk(prob.view(-1), k=nparams_toprune)\n mask.view(-1)[topk.indices] = 0\n return mask\n\n @classmethod\n def apply(cls, module, name, amount):\n return super().apply(module, name, amount=amount)", + "docstring": "Prune (currently unpruned) units in a tensor at random. Args: name (str): parameter name within ``, it represents the absolute number of parameters to prune.", + "type": "class", + "file_path": "pytorch\\torch\\nn\\utils\\prune.py", + "ast_data": "ClassDef name:RandomUnstructured Assign FunctionDef name:__init__ arg:self arg:amount arguments arg arg Call Assign FunctionDef name:compute_mask arg:self arg:t arg:default_mask arguments arg arg arg Assign Call Assign Call Call Assign Call If Compare Assign Call Assign Call Call Assign Call Return return:yes FunctionDef name:apply arg:cls arg:module arg:name arg:amount arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "set_width_ratios", + "source_code": "def set_width_ratios(self, width_ratios):\n if width_ratios is None:\n width_ratios = [1] * self._ncols\n elif len(width_ratios) != self._ncols:\n raise ValueError('Expected the given number of width ratios to match the number of columns of the grid')\n self._col_width_ratios = width_ratios", + "docstring": "Set the relative widths of the columns. *width_ratios* must be of length *ncols*. Each column gets a relative width of ``.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py", + "ast_data": "FunctionDef name:set_width_ratios arg:self arg:width_ratios arguments arg arg If Compare Assign If Compare Call Raise Call Assign" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, sizes, **kwargs):\n super().__init__(**kwargs)\n self.set_sizes(sizes)\n self.set_transform(transforms.IdentityTransform())\n self._paths = [mpath.Path.unit_circle()]", + "docstring": "Parameters ---------- sizes : float or array-like The area of each circle in points^2. **kwargs Forwarded to .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:sizes arguments arg arg arg Call Call Call Call Call Assign Call" + }, + { + "library": "tensorflow", + "name": "get_collection", + "source_code": "@tf_export(v1=['get_collection'])\ndef get_collection(key, scope=None) -> list[Any]:\n return get_default_graph().get_collection(key, scope)", + "docstring": "Wrapper for using the default graph. See for more details. Args: key: The key for the collection. For example, the class contains many standard names for collections. scope: (Optional.) If supplied, the resulting list is filtered to include only items whose attribute matches using . Items without a attribute are never returned if a scope is supplied and the choice or means that a without special tokens filters by prefix. Returns: The list of values in the collection with the given , or an empty list if no value has been added to that collection. The list contains the values in the order under which they were collected. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:get_collection arg:key arg:scope arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "cherrypy", + "name": "LockChecker", + "source_code": "class LockChecker(object):\n\n def __init__(self, session_id, timeout):\n self.session_id = session_id\n if timeout:\n self.timer = Timer.after(timeout)\n else:\n self.timer = NeverExpires()\n\n def expired(self):\n if self.timer.expired():\n raise LockTimeout('Timeout acquiring lock for %(session_id)s' % vars(self))\n return False", + "docstring": "Keep track of the time and detect if a timeout has expired.", + "type": "class", + "file_path": "cherrypy\\cherrypy\\lib\\locking.py", + "ast_data": "ClassDef name:LockChecker FunctionDef name:__init__ arg:self arg:session_id arg:timeout arguments arg arg arg Assign If Assign Call Assign Call FunctionDef name:expired arg:self arguments arg If Call Raise Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "shift", + "source_code": "def shift(self, periods: int=1, fill_value: object=None) -> ExtensionArray:\n if not len(self) or periods == 0:\n return self.copy()\n if isna(fill_value):\n fill_value = self.dtype.na_value\n empty = self._from_sequence([fill_value] * min(abs(periods), len(self)), dtype=self.dtype)\n if periods > 0:\n a = empty\n b = self[:-periods]\n else:\n a = self[abs(periods):]\n b = empty\n return self._concat_same_type([a, b])", + "docstring": "Shift values by desired number. Newly introduced missing values are filled with ``. For 2-dimensional ExtensionArrays, we are always shifting along axis=0. Examples -------- >>> arr = pd.array([1, 2, 3]) >>> arr.shift(2) [, , 1] Length: 3, dtype: Int64", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:shift arg:self arg:periods arg:fill_value arguments arg arg arg If BoolOp Call Compare Return return:yes Call If Call Assign Assign Call Call Call Call If Compare Assign Assign Assign Call Assign Return return:yes Call" + }, + { + "library": "django", + "name": "__len__", + "source_code": "def __len__(self):\n return self.num_fields", + "docstring": "Return the count of fields in this feature.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py", + "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "make_functional_with_buffers", + "source_code": "def make_functional_with_buffers(model: nn.Module, disable_autograd_tracking: bool=False) -> tuple[FunctionalModuleWithBuffers, tuple[Tensor, ...], tuple[Tensor, ...]]:\n return FunctionalModuleWithBuffers._create_from(model, disable_autograd_tracking=disable_autograd_tracking)", + "docstring": "make_functional_with_buffers(model, disable_autograd_tracking=False) -> func, params, buffers Given a `` to avoid unnecessarily tracking history with PyTorch autograd.", + "type": "function", + "file_path": "pytorch\\torch\\_functorch\\make_functional.py", + "ast_data": "FunctionDef name:make_functional_with_buffers arg:model arg:disable_autograd_tracking arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "_gaussian_kernel1d", + "source_code": "def _gaussian_kernel1d(sigma, order, radius):\n if order < 0:\n raise ValueError('order must be non-negative')\n exponent_range = np.arange(order + 1)\n sigma2 = sigma * sigma\n x = np.arange(-radius, radius + 1)\n phi_x = np.exp(-0.5 / sigma2 * x ** 2)\n phi_x = phi_x / phi_x.sum()\n if order == 0:\n return phi_x\n else:\n q = np.zeros(order + 1)\n q[0] = 1\n D = np.diag(exponent_range[1:], 1)\n P = np.diag(np.ones(order) / -sigma2, -1)\n Q_deriv = D + P\n for _ in range(order):\n q = Q_deriv.dot(q)\n q = (x[:, None] ** exponent_range).dot(q)\n return q * phi_x", + "docstring": "Computes a 1-D Gaussian convolution kernel.", + "type": "function", + "file_path": "scipy\\scipy\\ndimage\\_filters.py", + "ast_data": "FunctionDef name:_gaussian_kernel1d arg:sigma arg:order arg:radius arguments arg arg arg If Compare Raise Call Assign Call Assign Assign Call Assign Call Assign Call If Compare Return return:yes Assign Call Assign Assign Call Assign Call Call Assign For Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "read_query", + "source_code": "def read_query(self, sql: str, index_col: str | list[str] | None=None, coerce_float: bool=True, parse_dates=None, params=None, chunksize: int | None=None, dtype: DtypeArg | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame | Iterator[DataFrame]:\n if coerce_float is not True:\n raise NotImplementedError(\"'coerce_float' is not implemented for ADBC drivers\")\n if params:\n raise NotImplementedError(\"'params' is not implemented for ADBC drivers\")\n if chunksize:\n raise NotImplementedError(\"'chunksize' is not implemented for ADBC drivers\")\n with self.execute(sql) as cur:\n pa_table = cur.fetch_arrow_table()\n df = arrow_table_to_pandas(pa_table, dtype_backend=dtype_backend)\n return _wrap_result_adbc(df, index_col=index_col, parse_dates=parse_dates, dtype=dtype)", + "docstring": "Read SQL query into a DataFrame. Parameters ---------- sql : str SQL query to be executed. index_col : string, optional, default: None Column name to use as index for the returned DataFrame object. coerce_float : bool, default True Raises NotImplementedError params : list, tuple or dict, optional, default: None Raises NotImplementedError parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of `pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. chunksize : int, default None Raises NotImplementedError dtype : Type name or dict of columns Data type for data or columns. E.g. np.float64 or {'a': np.float64, 'b': np.int32, 'c': 'Int64'} .. versionadded:: 1.3.0 Returns ------- DataFrame See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql", + "type": "method", + "file_path": "pandas\\pandas\\io\\sql.py", + "ast_data": "FunctionDef name:read_query arg:self arg:sql arg:index_col arg:coerce_float arg:parse_dates arg:params arg:chunksize arg:dtype arg:dtype_backend arguments arg arg arg arg arg arg arg arg arg If Compare Raise Call If Raise Call If Raise Call With Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, y):\n check_is_fitted(self)\n class_to_index = self._build_cache()\n yt = self._transform(y, class_to_index)\n if not self.sparse_output:\n yt = yt.toarray()\n return yt", + "docstring": "Transform the given label sets. Parameters ---------- y : iterable of iterables A set of labels (any orderable and hashable object) for each sample. If the parameter is set, will not be iterated. Returns ------- y_indicator : array or CSR matrix, shape (n_samples, n_classes) A matrix such that iff is in , and 0 otherwise.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py", + "ast_data": "FunctionDef name:transform arg:self arg:y arguments arg arg Call Assign Call Assign Call If Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, graph_a: _C.Graph, graph_b: _C.Graph):\n self.graph_a = graph_a\n self.graph_b = graph_b", + "docstring": "Construct a _GraphDiff object. Args: graph_a (_C.Graph): First graph to compare. graph_b (_C.Graph): Second graph to compare.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\verification.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:graph_a arg:graph_b arguments arg arg arg Assign Assign" + }, + { + "library": "kornia", + "name": "vgg19_bn", + "source_code": "def vgg19_bn(*, weights: Optional[Any]=None, **kwargs: Any) -> VGG:\n return _vgg('E', True, weights, **kwargs)", + "docstring": "VGG-19_BN from __. Args: weights (:class:, optional): The pretrained weights to use. See :class: below for more details, and possible values. By default, no pre-trained weights are used. progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True. **kwargs: parameters passed to the `source code `_ for more details about this class. .. autoclass:: torchvision.models.VGG19_BN_Weights :members:", + "type": "function", + "file_path": "kornia\\kornia\\feature\\dedode\\vgg.py", + "ast_data": "FunctionDef name:vgg19_bn arguments arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_is_boolean", + "source_code": "@property\ndef _is_boolean(self) -> bool:\n return False", + "docstring": "Whether this dtype should be considered boolean. By default, ExtensionDtypes are assumed to be non-numeric. Setting this to True will affect the behavior of several places, e.g. * is_bool * boolean indexing Returns ------- bool", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\base.py", + "ast_data": "FunctionDef name:_is_boolean arg:self arguments arg Return return:yes" + }, + { + "library": "authlib", + "name": "get_token_credential", + "source_code": "def get_token_credential(self, request):\n raise NotImplementedError()", + "docstring": "Fetch the token credential from data store like a database, framework should implement this function. :param request: OAuth1Request instance :return: Token model instance", + "type": "method", + "file_path": "authlib\\authlib\\oauth1\\rfc5849\\resource_protector.py", + "ast_data": "FunctionDef name:get_token_credential arg:self arg:request arguments arg arg Raise Call" + }, + { + "library": "cherrypy", + "name": "__call__", + "source_code": "def __call__(self, path_info):\n request = cherrypy.serving.request\n resource, vpath = self.find_handler(path_info)\n if resource:\n avail = [m for m in dir(resource) if m.isupper()]\n if 'GET' in avail and 'HEAD' not in avail:\n avail.append('HEAD')\n avail.sort()\n cherrypy.serving.response.headers['Allow'] = ', '.join(avail)\n meth = request.method.upper()\n func = getattr(resource, meth, None)\n if func is None and meth == 'HEAD':\n func = getattr(resource, 'GET', None)\n if func:\n if hasattr(func, '_cp_config'):\n request.config.update(func._cp_config)\n vpath = [x.replace('%2F', '/') for x in vpath]\n request.handler = LateParamPageHandler(func, *vpath)\n else:\n request.handler = cherrypy.HTTPError(405)\n else:\n request.handler = cherrypy.NotFound()", + "docstring": "Set handler and config for the current request.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpdispatch.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:path_info arguments arg arg Assign Assign Call If Assign Call Call If BoolOp Compare Compare Call Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Call If If Call Call Assign Call Assign Call Assign Call Assign Call" + }, + { + "library": "pytorch", + "name": "statically_known_multiple_of", + "source_code": "def statically_known_multiple_of(self, numerator: Expr, denominator: Union[Expr, int]) -> bool:\n if free_unbacked_symbols(numerator) or free_unbacked_symbols(denominator):\n return False\n expr = sympy.Eq(numerator % denominator, 0)\n return self.is_expr_static_and_true(expr)", + "docstring": "Return a bool indicating if it is sound to optimize for the numerator being a multiple of the denominator.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\sizevars.py", + "ast_data": "FunctionDef name:statically_known_multiple_of arg:self arg:numerator arg:denominator arguments arg arg arg If BoolOp Call Call Return return:yes Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_Upgrade2To3", + "source_code": "def _Upgrade2To3(self, data):\n buffers = [{'data': []}]\n for subgraph in data['subgraphs']:\n if 'tensors' not in subgraph:\n continue\n for tensor in subgraph['tensors']:\n if 'data_buffer' not in tensor:\n tensor['buffer'] = 0\n else:\n if tensor['data_buffer']:\n tensor[u'buffer'] = len(buffers)\n buffers.append({'data': tensor['data_buffer']})\n else:\n tensor['buffer'] = 0\n del tensor['data_buffer']\n data['buffers'] = buffers", + "docstring": "Upgrade data from Version 2 to Version 3. Changed actual read-only tensor data to be in a buffers table instead of inline with the tensor. Args: data: Dictionary representing the TensorFlow lite data to be upgraded. This will be modified in-place to be an upgraded version.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\schema\\upgrade_schema.py", + "ast_data": "FunctionDef name:_Upgrade2To3 arg:self arg:data arguments arg arg Assign For If Compare For If Compare Assign If Assign Call Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, node_def, op, message, *args):\n super(OutOfRangeError, self).__init__(node_def, op, message, OUT_OF_RANGE, *args)", + "docstring": "Creates an .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call" + }, + { + "library": "django", + "name": "normalize_name", + "source_code": "def normalize_name(self, name):\n nn = self.quote_name(name)\n if nn[0] == '\"' and nn[-1] == '\"':\n nn = nn[1:-1]\n return nn", + "docstring": "Get the properly shortened and uppercased identifier as returned by quote_name() but without the quotes.", + "type": "method", + "file_path": "django\\django\\db\\backends\\oracle\\schema.py", + "ast_data": "FunctionDef name:normalize_name arg:self arg:name arguments arg arg Assign Call If BoolOp Compare Compare Assign Return return:yes" + }, + { + "library": "authlib", + "name": "register_compliance_hook", + "source_code": "def register_compliance_hook(self, hook_type, hook):\n if hook_type == 'protected_request':\n self.token_auth.hooks.add(hook)\n return\n if hook_type not in self.compliance_hook:\n raise ValueError('Hook type %s is not in %s.', hook_type, self.compliance_hook)\n self.compliance_hook[hook_type].add(hook)", + "docstring": "Register a hook for request/response tweaking. Available hooks are: * access_token_response: invoked before token parsing. * refresh_token_request: invoked before refreshing token. * refresh_token_response: invoked before refresh token parsing. * protected_request: invoked before making a request. * revoke_token_request: invoked before revoking a token. * introspect_token_request: invoked before introspecting a token.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\client.py", + "ast_data": "FunctionDef name:register_compliance_hook arg:self arg:hook_type arg:hook arguments arg arg arg If Compare Call Return return:no If Compare Raise Call Call" + }, + { + "library": "pytorch", + "name": "is_prerelease", + "source_code": "@property\ndef is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None", + "docstring": "Whether this version is a pre-release. >>> Version(\"1.2.3\").is_prerelease False >>> Version(\"1.2.3a1\").is_prerelease True >>> Version(\"1.2.3b1\").is_prerelease True >>> Version(\"1.2.3rc1\").is_prerelease True >>> Version(\"1.2.3dev1\").is_prerelease True", + "type": "method", + "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py", + "ast_data": "FunctionDef name:is_prerelease arg:self arguments arg Return return:yes BoolOp Compare Compare" + }, + { + "library": "pytorch", + "name": "register_pointwise", + "source_code": "def register_pointwise(aten_fn, name=None, broadcast=True, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, convert_input_to_bool=False, override_return_dtype=None, override_fn_when_input_bool=None, allow_alpha=False, triton_fallback=None):\n name = name or aten_fn.__name__\n fn = ops_wrapper(name)\n register_op_dtype_propagation_rules(name, type_promotion_kind, override_return_dtype)\n if override_fn_when_input_bool is not None:\n override_fn_when_input_bool = ops_wrapper(override_fn_when_input_bool)\n fn = make_pointwise(fn, override_return_dtype=override_return_dtype, override_fn_when_input_bool=override_fn_when_input_bool, allow_alpha=allow_alpha, triton_fallback=triton_fallback)\n fn = register_lowering(aten_fn, broadcast=broadcast, type_promotion_kind=type_promotion_kind, convert_input_to_bool=convert_input_to_bool)(fn)\n if hasattr(prims, name):\n register_lowering(getattr(prims, name), type_promotion_kind=None, convert_input_to_bool=convert_input_to_bool)(fn)\n return fn", + "docstring": "A pointwise function that maps ops.{name} to inputs", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\lowering.py", + "ast_data": "FunctionDef name:register_pointwise arg:aten_fn arg:name arg:broadcast arg:type_promotion_kind arg:convert_input_to_bool arg:override_return_dtype arg:override_fn_when_input_bool arg:allow_alpha arg:triton_fallback arguments arg arg arg arg arg arg arg arg arg Assign BoolOp Assign Call Call If Compare Assign Call Assign Call Assign Call Call If Call Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "_sym_ortho", + "source_code": "def _sym_ortho(a, b):\n if b == 0:\n return (np.sign(a), 0, abs(a))\n elif a == 0:\n return (0, np.sign(b), abs(b))\n elif abs(b) > abs(a):\n tau = a / b\n s = np.sign(b) / sqrt(1 + tau * tau)\n c = s * tau\n r = b / s\n else:\n tau = b / a\n c = np.sign(a) / sqrt(1 + tau * tau)\n s = c * tau\n r = a / c\n return (c, s, r)", + "docstring": "Stable implementation of Givens rotation. Notes ----- The routine 'SymOrtho' was added for numerical stability. This is recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of `` in some important places (see, for example text following \"Compute the next plane rotation Qk\" in minres.py). References ---------- .. [1] S.-C. Choi, \"Iterative Methods for Singular Linear Equations and Least-Squares Problems\", Dissertation,", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\linalg\\_isolve\\lsqr.py", + "ast_data": "FunctionDef name:_sym_ortho arg:a arg:b arguments arg arg If Compare Return return:yes Call Call If Compare Return return:yes Call Call If Compare Call Call Assign Assign Call Call Assign Assign Assign Assign Call Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "run_restore_ops", + "source_code": "def run_restore_ops(self, session=None):\n if context.executing_eagerly():\n return\n if session is None:\n session = get_session()\n with ops.device('/cpu:0'):\n saveables = self._gather_saveable_objects()\n v1_saver_lib.Saver(saveables).restore(sess=session, save_path=self._checkpoint.save_path)", + "docstring": "Load the name-based checkpoint using a new .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:run_restore_ops arg:self arg:session arguments arg arg If Call Return return:no If Compare Assign Call With Call Assign Call Call Call" + }, + { + "library": "django", + "name": "deconstructible", + "source_code": "def deconstructible(*args, path=None):\n\n def decorator(klass):\n\n def __new__(cls, *args, **kwargs):\n obj = super(klass, cls).__new__(cls)\n obj._constructor_args = (args, kwargs)\n return obj\n\n def deconstruct(obj):\n if path and type(obj) is klass:\n module_name, _, name = path.rpartition('.')\n else:\n module_name = obj.__module__\n name = obj.__class__.__name__\n module = import_module(module_name)\n if not hasattr(module, name):\n raise ValueError('Could not find object %s in %s.\\nPlease note that you cannot serialize things like inner classes. Please move the object into the main module body to use migrations.\\nFor more information, see https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values' % (name, module_name, get_docs_version()))\n return (path if path and type(obj) is klass else f'{obj.__class__.__module__}.{name}', obj._constructor_args[0], obj._constructor_args[1])\n klass.__new__ = staticmethod(__new__)\n klass.deconstruct = deconstruct\n return klass\n if not args:\n return decorator\n return decorator(*args)", + "docstring": "Class decorator that allows the decorated class to be serialized by the migrations subsystem. The kwarg specifies the import path.", + "type": "function", + "file_path": "django\\django\\utils\\deconstruct.py", + "ast_data": "FunctionDef name:deconstructible arguments arg arg FunctionDef name:decorator arg:klass arguments arg FunctionDef name:__new__ arg:cls arguments arg arg arg Assign Call Call Assign Return return:yes FunctionDef name:deconstruct arg:obj arguments arg If BoolOp Compare Call Assign Call Assign Assign Assign Call If Call Raise Call Call Return return:yes BoolOp Compare Call Assign Call Assign Return return:yes If Return return:yes Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_non_decade_format", + "source_code": "def _non_decade_format(self, sign_string, base, fx, usetex):\n return '$\\\\mathdefault{%s%s^{%.2f}}$' % (sign_string, base, fx)", + "docstring": "Return string for non-decade locations.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:_non_decade_format arg:self arg:sign_string arg:base arg:fx arg:usetex arguments arg arg arg arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_clip_gradients", + "source_code": "def _clip_gradients(self, grads):\n return grads", + "docstring": "Clip gradients according to the clipnorm and clipvalue attributes.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v1.py", + "ast_data": "FunctionDef name:_clip_gradients arg:self arg:grads arguments arg arg Return return:yes" + }, + { + "library": "numpy", + "name": "_DomainTan", + "source_code": "class _DomainTan:\n\n def __init__(self, eps):\n self.eps = eps\n\n def __call__(self, x):\n with np.errstate(invalid='ignore'):\n return umath.less(umath.absolute(umath.cos(x)), self.eps)", + "docstring": "Define a valid interval for the function, so that: ``", + "type": "class", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "ClassDef name:_DomainTan FunctionDef name:__init__ arg:self arg:eps arguments arg arg Assign FunctionDef name:__call__ arg:self arg:x arguments arg arg With Call Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "has_fit_parameter", + "source_code": "def has_fit_parameter(estimator, parameter):\n return hasattr(estimator, 'fit') and parameter in signature(estimator.fit).parameters", + "docstring": "Check whether the estimator's fit method supports the given parameter. Parameters ---------- estimator : object An estimator to inspect. parameter : str The searched parameter. Returns ------- is_parameter : bool Whether the parameter was found to be a named parameter of the estimator's fit method. Examples -------- >>> from sklearn.svm import SVC >>> from sklearn.utils.validation import has_fit_parameter >>> has_fit_parameter(SVC(), \"sample_weight\") True", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\validation.py", + "ast_data": "FunctionDef name:has_fit_parameter arg:estimator arg:parameter arguments arg arg Return return:yes BoolOp Call Compare Call" + }, + { + "library": "pandas", + "name": "_format_attrs", + "source_code": "def _format_attrs(self):\n attrs = super()._format_attrs()\n for attrib in self._attributes:\n if attrib == 'freq':\n freq = self.freqstr\n if freq is not None:\n freq = repr(freq)\n attrs.append(('freq', freq))\n return attrs", + "docstring": "Return a list of tuples of the (attr,formatted_value).", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\datetimelike.py", + "ast_data": "FunctionDef name:_format_attrs arg:self arguments arg Assign Call Call For If Compare Assign If Compare Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "twinx", + "source_code": "def twinx(self, axes_class=None):\n ax = self._add_twin_axes(axes_class, sharex=self)\n self.axis['right'].set_visible(False)\n ax.axis['right'].set_visible(True)\n ax.axis['left', 'top', 'bottom'].set_visible(False)\n return ax", + "docstring": "Create a twin of Axes with a shared x-axis but independent y-axis. The y-axis of self will have ticks on the left and the returned axes will have ticks on the right.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\parasite_axes.py", + "ast_data": "FunctionDef name:twinx arg:self arg:axes_class arguments arg arg Assign Call Call Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "_common_stride", + "source_code": "def _common_stride(offsets, counts, itemsize):\n if len(offsets) <= 1:\n return itemsize\n negative = offsets[1] < offsets[0]\n if negative:\n it = zip(reversed(offsets), reversed(counts))\n else:\n it = zip(offsets, counts)\n prev_offset = None\n stride = None\n for offset, count in it:\n if count != 1:\n if negative:\n return None\n if stride is None:\n stride = itemsize\n if stride != itemsize:\n return None\n end_offset = offset + (count - 1) * itemsize\n else:\n end_offset = offset\n if prev_offset is not None:\n new_stride = offset - prev_offset\n if stride is None:\n stride = new_stride\n if stride != new_stride:\n return None\n prev_offset = end_offset\n if negative:\n return -stride\n return stride", + "docstring": "Returns the stride between the fields, or None if the stride is not constant. The values in \"counts\" designate the lengths of subarrays. Subarrays are treated as many contiguous fields, with always positive stride.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\recfunctions.py", + "ast_data": "FunctionDef name:_common_stride arg:offsets arg:counts arg:itemsize arguments arg arg arg If Compare Call Return return:yes Assign Compare If Assign Call Call Call Assign Call Assign Assign For If Compare If Return return:no If Compare Assign If Compare Return return:no Assign Assign If Compare Assign If Compare Assign If Compare Return return:no Assign If Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "WorkOrder", + "source_code": "@dataclasses.dataclass(frozen=True)\nclass WorkOrder:\n label: Label\n autolabels: AutoLabels\n timer_args: WorkerTimerArgs\n source_cmd: Optional[str] = None\n timeout: Optional[float] = None\n retries: int = 0\n\n def __hash__(self) -> int:\n return id(self)\n\n def __str__(self) -> str:\n return json.dumps({'label': self.label, 'autolabels': self.autolabels.as_dict, 'num_threads': self.timer_args.num_threads})", + "docstring": "Spec to schedule work with the benchmark runner.", + "type": "class", + "file_path": "pytorch\\benchmarks\\instruction_counts\\execution\\work.py", + "ast_data": "ClassDef name:WorkOrder FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_should_act_as_resource_variable", + "source_code": "def _should_act_as_resource_variable(self):\n return True", + "docstring": "Pass resource_variable_ops.is_resource_variable check.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py", + "ast_data": "FunctionDef name:_should_act_as_resource_variable arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "KeyErrorMessage", + "source_code": "class KeyErrorMessage(str):\n __slots__ = ()\n\n def __repr__(self):\n return self", + "docstring": "str subclass that returns itself in repr", + "type": "class", + "file_path": "pytorch\\torch\\_utils.py", + "ast_data": "ClassDef name:KeyErrorMessage Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "create_equality_constraints_for_broadcasting", + "source_code": "def create_equality_constraints_for_broadcasting(e1: TVar, e2: TVar, e11: TVar, e12: TVar, d1: list[DVar], d2: list[DVar], d11: list[DVar], d12: list[DVar]):\n e1_tensor = BinConstraintT(e1, TensorType(d1), op_eq)\n e11_tensor = BinConstraintT(e11, TensorType(d11), op_eq)\n e2_tensor = BinConstraintT(e2, TensorType(d2), op_eq)\n e12_tensor = BinConstraintT(e12, TensorType(d12), op_eq)\n return [e1_tensor, e11_tensor, e2_tensor, e12_tensor]", + "docstring": "Create equality constraints for when no broadcasting occurs Args: e1: Input 1 e2: Input 2 e11: Broadcasted input 1 e12: Broadcasted input 2 d1: Variables that store dimensions for e1 d2: Variables that store dimensions for e2 d11: Variables that store dimensions for e11 d12: Variables that store dimensions for e22 Returns: Four equality constraints", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", + "ast_data": "FunctionDef name:create_equality_constraints_for_broadcasting arg:e1 arg:e2 arg:e11 arg:e12 arg:d1 arg:d2 arg:d11 arg:d12 arguments arg arg arg arg arg arg arg arg Assign Call Call Assign Call Call Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "get_matrix", + "source_code": "def get_matrix(self):\n if self.approx_type == 'hess':\n M = np.copy(self.B)\n else:\n M = np.copy(self.H)\n li = np.tril_indices_from(M, k=-1)\n M[li] = M.T[li]\n return M", + "docstring": "Return the current internal matrix. Returns ------- M : ndarray, shape (n, n) Dense matrix containing either the Hessian or its inverse (depending on how was defined).", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py", + "ast_data": "FunctionDef name:get_matrix arg:self arguments arg If Compare Assign Call Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "sum_duplicates", + "source_code": "def sum_duplicates(self):\n if self.has_canonical_format:\n return\n self.sort_indices()\n M, N = self._swap(self._shape_as_2d)\n csr_sum_duplicates(M, N, self.indptr, self.indices, self.data)\n self.prune()\n self.has_canonical_format = True", + "docstring": "Eliminate duplicate entries by adding them together This is an *in place* operation.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_compressed.py", + "ast_data": "FunctionDef name:sum_duplicates arg:self arguments arg If Return return:no Call Assign Call Call Call Assign" + }, + { + "library": "pandas", + "name": "ExponentialMovingWindowGroupby", + "source_code": "class ExponentialMovingWindowGroupby(BaseWindowGroupby, ExponentialMovingWindow):\n _attributes = ExponentialMovingWindow._attributes + BaseWindowGroupby._attributes\n\n def __init__(self, obj, *args, _grouper=None, **kwargs) -> None:\n super().__init__(obj, *args, _grouper=_grouper, **kwargs)\n if not obj.empty and self.times is not None:\n groupby_order = np.concatenate(list(self._grouper.indices.values()))\n self._deltas = _calculate_deltas(self.times.take(groupby_order), self.halflife)\n\n def _get_window_indexer(self) -> GroupbyIndexer:\n window_indexer = GroupbyIndexer(groupby_indices=self._grouper.indices, window_indexer=ExponentialMovingWindowIndexer)\n return window_indexer", + "docstring": "Provide an exponential moving window groupby implementation.", + "type": "class", + "file_path": "pandas\\pandas\\core\\window\\ewm.py", + "ast_data": "ClassDef name:ExponentialMovingWindowGroupby Assign FunctionDef name:__init__ arg:self arg:obj arguments arg arg arg arg arg Call Call If BoolOp Compare Assign Call Call Call Assign Call Call FunctionDef name:_get_window_indexer arg:self arguments arg Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "shade_normals", + "source_code": "def shade_normals(self, normals, fraction=1.0):\n intensity = normals.dot(self.direction)\n imin, imax = (intensity.min(), intensity.max())\n intensity *= fraction\n if imax - imin > 1e-06:\n intensity -= imin\n intensity /= imax - imin\n intensity = np.clip(intensity, 0, 1)\n return intensity", + "docstring": "Calculate the illumination intensity for the normal vectors of a surface using the defined azimuth and elevation for the light source. Imagine an artificial sun placed at infinity in some azimuth and elevation position illuminating our surface. The parts of the surface that slope toward the sun should brighten while those sides facing away should become darker. Parameters ---------- fraction : number, optional Increases or decreases the contrast of the hillshade. Values greater than one will cause intermediate values to move closer to full illumination or shadow (and clipping any values that move beyond 0 or 1). Note that this is not visually or mathematically the same as vertical exaggeration. Returns ------- A 2D array of illumination values between 0-1, where 0 is completely in shadow and 1 is completely illuminated.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:shade_normals arg:self arg:normals arg:fraction arguments arg arg arg Assign Call Assign Call Call If Compare Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "hermval", + "source_code": "def hermval(x, c, tensor=True):\n c = np.array(c, ndmin=1, copy=None)\n if c.dtype.char in '?bBhHiIlLqQpP':\n c = c.astype(np.double)\n if isinstance(x, (tuple, list)):\n x = np.asarray(x)\n if isinstance(x, np.ndarray) and tensor:\n c = c.reshape(c.shape + (1,) * x.ndim)\n x2 = x * 2\n if len(c) == 1:\n c0 = c[0]\n c1 = 0\n elif len(c) == 2:\n c0 = c[0]\n c1 = c[1]\n else:\n nd = len(c)\n c0 = c[-2]\n c1 = c[-1]\n for i in range(3, len(c) + 1):\n tmp = c0\n nd = nd - 1\n c0 = c[-i] - c1 * (2 * (nd - 1))\n c1 = tmp + c1 * x2\n return c0 + c1 * x2", + "docstring": "Evaluate an Hermite series at points x. If is of length `xxccxctensortensortensorxxcccxcxxcc` is multidimensional. The default value is True. Returns ------- values : ndarray, algebra_like The shape of the return value is described above. See Also -------- hermval2d, hermgrid2d, hermval3d, hermgrid3d Notes ----- The evaluation uses Clenshaw recursion, aka synthetic division. Examples -------- >>> from numpy.polynomial.hermite import hermval >>> coef = [1,2,3] >>> hermval(1, coef) 11.0 >>> hermval([[1,2],[3,4]], coef) array([[ 11., 51.], [115., 203.]])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\hermite.py", + "ast_data": "FunctionDef name:hermval arg:x arg:c arg:tensor arguments arg arg arg Assign Call If Compare Assign Call If Call Assign Call If BoolOp Call Assign Call Assign If Compare Call Assign Assign If Compare Call Assign Assign Assign Call Assign Assign For Call Call Assign Assign Assign Assign Return return:yes" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self):\n self.another = AnotherPage()", + "docstring": "Mount another page into the home page app.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\tutorial\\tut05_derived_objects.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign Call" + }, + { + "library": "pytorch", + "name": "convert_cmake_value_to_python_value", + "source_code": "def convert_cmake_value_to_python_value(cmake_value: str, cmake_type: str) -> CMakeValue:\n cmake_type = cmake_type.upper()\n up_val = cmake_value.upper()\n if cmake_type == 'BOOL':\n return not (up_val in ('FALSE', 'OFF', 'N', 'NO', '0', '', 'NOTFOUND') or up_val.endswith('-NOTFOUND'))\n elif cmake_type == 'FILEPATH':\n if up_val.endswith('-NOTFOUND'):\n return None\n else:\n return cmake_value\n else:\n return cmake_value", + "docstring": "Convert a CMake value in a string form to a Python value. Args: cmake_value (string): The CMake value in a string form (e.g., \"ON\", \"OFF\", \"1\"). cmake_type (string): The CMake type of :attr:. Returns: A Python value corresponding to :attr: with type :attr:.", + "type": "function", + "file_path": "pytorch\\tools\\setup_helpers\\cmake_utils.py", + "ast_data": "FunctionDef name:convert_cmake_value_to_python_value arg:cmake_value arg:cmake_type arguments arg arg Assign Call Assign Call If Compare Return return:yes BoolOp Compare Call If Compare If Call Return return:no Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_array_indexing", + "source_code": "def _array_indexing(array, key, key_dtype, axis):\n xp, is_array_api = get_namespace(array)\n if is_array_api:\n return xp.take(array, key, axis=axis)\n if issparse(array) and key_dtype == 'bool':\n key = np.asarray(key)\n if isinstance(key, tuple):\n key = list(key)\n return array[key, ...] if axis == 0 else array[:, key]", + "docstring": "Index an array or scipy.sparse consistently across NumPy version.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_indexing.py", + "ast_data": "FunctionDef name:_array_indexing arg:array arg:key arg:key_dtype arg:axis arguments arg arg arg arg Assign Call If Return return:yes Call If BoolOp Call Compare Assign Call If Call Assign Call Return return:yes Compare" + }, + { + "library": "numpy", + "name": "apply_over_axes", + "source_code": "def apply_over_axes(func, a, axes):\n val = asarray(a)\n N = a.ndim\n if array(axes).ndim == 0:\n axes = (axes,)\n for axis in axes:\n if axis < 0:\n axis = N + axis\n args = (val, axis)\n res = func(*args)\n if res.ndim == val.ndim:\n val = res\n else:\n res = ma.expand_dims(res, axis)\n if res.ndim == val.ndim:\n val = res\n else:\n raise ValueError('function is not returning an array of the correct shape')\n return val", + "docstring": "(This docstring will be overwritten)", + "type": "function", + "file_path": "numpy\\numpy\\ma\\extras.py", + "ast_data": "FunctionDef name:apply_over_axes arg:func arg:a arg:axes arguments arg arg arg Assign Call Assign If Compare Call Assign For If Compare Assign Assign Assign Call If Compare Assign Assign Call If Compare Assign Raise Call Return return:yes" + }, + { + "library": "kornia", + "name": "cx_left", + "source_code": "@property\ndef cx_left(self) -> Tensor:\n return self.rectified_left_camera[..., 0, 2]", + "docstring": "Return the x-coordinate of the principal point for the left camera. Returns: tensor of shape :math:", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py", + "ast_data": "FunctionDef name:cx_left arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "_get_interval", + "source_code": "def _get_interval(self):\n return 1", + "docstring": "Return the number of units for each tick.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\dates.py", + "ast_data": "FunctionDef name:_get_interval arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "RootMeanSquaredError", + "source_code": "class RootMeanSquaredError(Mean):\n\n def __init__(self, name='root_mean_squared_error', dtype=None):\n super(RootMeanSquaredError, self).__init__(name, dtype=dtype)\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n y_true = math_ops.cast(y_true, self._dtype)\n y_pred = math_ops.cast(y_pred, self._dtype)\n y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)\n error_sq = math_ops.squared_difference(y_pred, y_true)\n return super(RootMeanSquaredError, self).update_state(error_sq, sample_weight=sample_weight)\n\n def result(self):\n return math_ops.sqrt(math_ops.div_no_nan(self.total, self.count))", + "docstring": "Computes root mean squared error metric between and . Standalone usage: >>> m = tf.keras.metrics.RootMeanSquaredError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.5 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.70710677 Usage with API:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "ClassDef name:RootMeanSquaredError FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call FunctionDef name:update_state arg:self arg:y_true arg:y_pred arg:sample_weight arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call FunctionDef name:result arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "get_input_shape_at", + "source_code": "def get_input_shape_at(self, node_index):\n return self._get_node_attribute_at_index(node_index, 'input_shapes', 'input shape')", + "docstring": "Retrieves the input shape(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first time the layer was called. Returns: A shape tuple (or list of shape tuples if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", + "ast_data": "FunctionDef name:get_input_shape_at arg:self arg:node_index arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "tpu_core_locations_to_ids", + "source_code": "def tpu_core_locations_to_ids(self, tpu_core_locations):\n return _pywrap_dtensor_device.TPUCoreLocationsToIDs(context.context()._handle, self._device_info, tpu_core_locations)", + "docstring": "Translates TPU core locations to TPU core IDs. Args: tpu_core_locations: A list of TPU core locations. Each one is a list of four unsigned integers, [x, y, z, core]. Returns: A list of corresponding TPU core IDs.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py", + "ast_data": "FunctionDef name:tpu_core_locations_to_ids arg:self arg:tpu_core_locations arguments arg arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "fprime2", + "source_code": "def fprime2(self, x, *args):\n if self.vals is None or x != self.x:\n self(x, *args)\n return self.vals[2]", + "docstring": "Calculate f'' or use a cached value if available", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_root_scalar.py", + "ast_data": "FunctionDef name:fprime2 arg:self arg:x arguments arg arg arg If BoolOp Compare Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "has_resource", + "source_code": "def has_resource(self, feature_column, resource_name):\n return resource_name in self._cols_to_resources_map[feature_column]", + "docstring": "Returns true iff a resource with same name exists. Resources can be things such as tables, variables, trackables, etc. Args: feature_column: A object this variable corresponds to. resource_name: Name of the resource.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:has_resource arg:self arg:feature_column arg:resource_name arguments arg arg arg Return return:yes Compare" + }, + { + "library": "cryptography", + "name": "__copy__", + "source_code": "@abc.abstractmethod\ndef __copy__(self) -> Ed448PrivateKey:\n pass", + "docstring": "Returns a copy.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed448.py", + "ast_data": "FunctionDef name:__copy__ arg:self arguments arg" + }, + { + "library": "kornia", + "name": "Brightness", + "source_code": "class Brightness(OperationBase):\n\n def __init__(self, initial_magnitude: Optional[float]=0.5, initial_probability: float=0.5, magnitude_range: Tuple[float, float]=(0.2, 1.8), temperature: float=0.1, symmetric_megnitude: bool=False) -> None:\n super().__init__(K.RandomBrightness(magnitude_range, same_on_batch=False, p=initial_probability), initial_magnitude=[('brightness_factor', initial_magnitude)], temperature=temperature, symmetric_megnitude=symmetric_megnitude)", + "docstring": "Apply brightness operation. Args: initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. initial_magnitude: the initial magnitude. magnitude_range: the sampling range for random sampling and clamping the optimized magnitude. temperature: temperature for RelaxedBernoulli distribution used during training. symmetric_megnitude: if to randomly assign the magnitude as negative or not.", + "type": "class", + "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py", + "ast_data": "ClassDef name:Brightness FunctionDef name:__init__ arg:self arg:initial_magnitude arg:initial_probability arg:magnitude_range arg:temperature arg:symmetric_megnitude arguments arg arg arg arg arg arg Call Call Call" + }, + { + "library": "pytorch", + "name": "PerGroup", + "source_code": "@dataclass(frozen=True)\nclass PerGroup(Granularity):\n group_size: int", + "docstring": "Represents per-channel group granularity in quantization. This granularity type calculates different quantization parameters for each group of elements. For example if the input tensor is shape [8, 16], and the group size is 4, then the input tensor is reshaped to [64, 4] quantization parameters are calculated for each group of 4 elements, giving a total of 64 quantization parameters. Attributes: group_size (int): The size of each quantization group", + "type": "class", + "file_path": "pytorch\\torch\\ao\\quantization\\observer.py", + "ast_data": "ClassDef name:PerGroup Call" + }, + { + "library": "scikit-learn", + "name": "__sklearn_tags__", + "source_code": "def __sklearn_tags__(self):\n tags = super().__sklearn_tags__()\n tags.input_tags.pairwise = get_tags(self.estimator).input_tags.pairwise\n tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse\n return tags", + "docstring": "Indicate if wrapped estimator is using a precomputed Gram matrix", + "type": "method", + "file_path": "scikit-learn\\sklearn\\multiclass.py", + "ast_data": "FunctionDef name:__sklearn_tags__ arg:self arguments arg Assign Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "f2", + "source_code": "def f2(x, h, k):\n return -x + sc.xlog1py(1.0 / h - 1.0, -h * np.exp(-x))", + "docstring": "pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0) logpdf = ...", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_continuous_distns.py", + "ast_data": "FunctionDef name:f2 arg:x arg:h arg:k arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "get_metadata_routing", + "source_code": "def get_metadata_routing(self):\n return get_routing_for_object(self._metadata_request)", + "docstring": "Get requested data properties. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.3 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\metrics\\_scorer.py", + "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_ticks", + "source_code": "def get_ticks(self, minor=False):\n if minor:\n return self.long_axis.get_minorticklocs()\n else:\n return self.long_axis.get_majorticklocs()", + "docstring": "Return the ticks as a list of locations. Parameters ---------- minor : boolean, default: False if True return the minor ticks.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py", + "ast_data": "FunctionDef name:get_ticks arg:self arg:minor arguments arg arg If Return return:yes Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "color", + "source_code": "def color(value, user_arg, command, comm_arg):\n arg = user_arg if user_arg != '' else comm_arg\n if value[0] == '#' and len(value) == 7:\n return (command, f'[HTML]{{{value[1:].upper()}}}{arg}')\n if value[0] == '#' and len(value) == 4:\n val = f'{value[1].upper() * 2}{value[2].upper() * 2}{value[3].upper() * 2}'\n return (command, f'[HTML]{{{val}}}{arg}')\n elif value[:3] == 'rgb':\n r = re.findall('(?<=\\\\()[0-9\\\\s%]+(?=,)', value)[0].strip()\n r = float(r[:-1]) / 100 if '%' in r else int(r) / 255\n g = re.findall('(?<=,)[0-9\\\\s%]+(?=,)', value)[0].strip()\n g = float(g[:-1]) / 100 if '%' in g else int(g) / 255\n if value[3] == 'a':\n b = re.findall('(?<=,)[0-9\\\\s%]+(?=,)', value)[1].strip()\n else:\n b = re.findall('(?<=,)[0-9\\\\s%]+(?=\\\\))', value)[0].strip()\n b = float(b[:-1]) / 100 if '%' in b else int(b) / 255\n return (command, f'[rgb]{{{r:.3f}, {g:.3f}, {b:.3f}}}{arg}')\n else:\n return (command, f'{{{value}}}{arg}')", + "docstring": "CSS colors have 5 formats to process: - 6 digit hex code: \"#ff23ee\" --> [HTML]{FF23EE} - 3 digit hex code: \"#f0e\" --> [HTML]{FF00EE} - rgba: rgba(128, 255, 0, 0.5) --> [rgb]{0.502, 1.000, 0.000} - rgb: rgb(128, 255, 0,) --> [rbg]{0.502, 1.000, 0.000} - string: red --> {red} Additionally rgb or rgba can be expressed in % which is also parsed.", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\style_render.py", + "ast_data": "FunctionDef name:color arg:value arg:user_arg arg:command arg:comm_arg arguments arg arg arg arg Assign Compare If BoolOp Compare Compare Call Return return:yes Call If BoolOp Compare Compare Call Assign Call Call Call Return return:yes If Compare Assign Call Call Assign Compare Call Call Assign Call Call Assign Compare Call Call If Compare Assign Call Call Assign Call Call Assign Compare Call Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, byminute=None, interval=1, tz=None):\n if byminute is None:\n byminute = range(60)\n rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval, bysecond=0)\n super().__init__(rule, tz=tz)", + "docstring": "Parameters ---------- byminute : int or list of int, default: all minutes Ticks will be placed on every minute in *byminute*. Default is `~datetime.tzinfotimezonedateutil.tz`.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\dates.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:byminute arg:interval arg:tz arguments arg arg arg arg If Compare Assign Call Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "check_all_files", + "source_code": "def check_all_files():\n for file_name in RELEVANT_FILES:\n check_existence(file_name)", + "docstring": "Check all relevant files necessary for upgrade.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\update_version.py", + "ast_data": "FunctionDef name:check_all_files arguments For Call" + }, + { + "library": "scipy", + "name": "visit_fn", + "source_code": "def visit_fn(self, temperature, dim):\n x, y = self.rng_gen.normal(size=(dim, 2)).T\n factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0))\n factor4 = self._factor4_p * factor1\n x *= np.exp(-(self._visiting_param - 1.0) * np.log(self._factor6 / factor4) / (3.0 - self._visiting_param))\n den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) / (3.0 - self._visiting_param))\n return x / den", + "docstring": "Formula Visita from p. 405 of reference [2]", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_dual_annealing.py", + "ast_data": "FunctionDef name:visit_fn arg:self arg:temperature arg:dim arguments arg arg arg Assign Call Assign Call Call Assign Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "matches", + "source_code": "@classmethod\ndef matches(cls, header):\n scheme, _, _ = header.partition(' ')\n return scheme.lower() == cls.scheme", + "docstring": "Check if header scheme matches auth implementation.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py", + "ast_data": "FunctionDef name:matches arg:cls arg:header arguments arg arg Assign Call Return return:yes Compare Call" + }, + { + "library": "scrapy", + "name": "TunnelError", + "source_code": "class TunnelError(Exception):\n pass", + "docstring": "An HTTP CONNECT tunnel could not be established by the proxy.", + "type": "class", + "file_path": "scrapy\\scrapy\\core\\downloader\\handlers\\http11.py", + "ast_data": "ClassDef name:TunnelError" + }, + { + "library": "sphinx", + "name": "get_enumerable_node_type", + "source_code": "def get_enumerable_node_type(self, node: Node) -> str | None:\n enum_node_type, _ = self.enumerable_nodes.get(node.__class__, (None, None))\n return enum_node_type", + "docstring": "Get type of enumerable nodes (experimental).", + "type": "method", + "file_path": "sphinx\\sphinx\\domains\\__init__.py", + "ast_data": "FunctionDef name:get_enumerable_node_type arg:self arg:node arguments arg arg Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "layer_uses_training_bool", + "source_code": "def layer_uses_training_bool(layer):\n if layer._expects_training_arg:\n return True\n visited = {layer}\n to_visit = list_all_layers(layer)\n while to_visit:\n layer = to_visit.pop()\n if layer in visited:\n continue\n if getattr(layer, '_expects_training_arg', True):\n return True\n visited.add(layer)\n to_visit.extend(list_all_layers(layer))\n return False", + "docstring": "Returns whether this layer or any of its children uses the training arg.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\utils.py", + "ast_data": "FunctionDef name:layer_uses_training_bool arg:layer arguments arg If Return return:yes Assign Assign Call While Assign Call If Compare If Call Return return:yes Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "seuclidean", + "source_code": "def seuclidean(u, v, V):\n u = _validate_vector(u)\n v = _validate_vector(v)\n V = _validate_vector(V, dtype=np.float64)\n if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:\n raise TypeError('V must be a 1-D array of the same dimension as u and v.')\n return euclidean(u, v, w=1 / V)", + "docstring": "Return the standardized Euclidean distance between two 1-D arrays. The standardized Euclidean distance between two n-vectors and is .. math:: \\sqrt{\\sum\\limits_i \\frac{1}{V_i} \\left(u_i-v_i \\right)^2} `Vuv`. Examples -------- >>> from scipy.spatial import distance >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [0.1, 0.1, 0.1]) 4.4721359549995796 >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [1, 0.1, 0.1]) 3.3166247903553998 >>> distance.seuclidean([1, 0, 0], [0, 1, 0], [10, 0.1, 0.1]) 3.1780497164141406", + "type": "function", + "file_path": "scipy\\scipy\\spatial\\distance.py", + "ast_data": "FunctionDef name:seuclidean arg:u arg:v arg:V arguments arg arg arg Assign Call Assign Call Assign Call If BoolOp Compare Compare Raise Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_root_scalar_brenth_doc", + "source_code": "def _root_scalar_brenth_doc():\n pass", + "docstring": "Options ------- args : tuple, optional Extra arguments passed to the objective function. bracket: A sequence of 2 floats, optional An interval bracketing a root. `` must have different signs at the two endpoints. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. options: dict, optional Specifies any method-specific options not covered above.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_root_scalar.py", + "ast_data": "FunctionDef name:_root_scalar_brenth_doc arguments" + }, + { + "library": "tensorflow", + "name": "_ErrorMetadata", + "source_code": "class _ErrorMetadata(error_utils.ErrorMetadataBase):\n\n def create_exception(self, source_error):\n preferred_type = type(source_error)\n if issubclass(preferred_type, errors_impl.OpError):\n init_argspec = tf_inspect.getfullargspec(preferred_type.__init__)\n message = self.get_message()\n init_args = tuple(init_argspec.args)\n if init_args == ('self', 'node_def', 'op', 'message'):\n return preferred_type(source_error.node_def, source_error.op, message, source_error.experimental_payloads)\n elif preferred_type in (errors.PyCTError, AutoGraphError, ConversionError, StagingError, errors_impl.InaccessibleTensorError, errors_impl.OperatorNotAllowedInGraphError):\n return preferred_type(self.get_message())\n exc = super(_ErrorMetadata, self).create_exception(source_error)\n if exc is not None:\n return exc\n return StagingError(self.get_message())", + "docstring": "AutoGraph-specific error metadata. See base class.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py", + "ast_data": "ClassDef name:_ErrorMetadata FunctionDef name:create_exception arg:self arg:source_error arguments arg arg Assign Call If Call Assign Call Assign Call Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Call Assign Call Call If Compare Return return:yes Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "convert", + "source_code": "@_export_metrics\ndef convert(self):\n saved_model_convert_result = self._convert_as_saved_model()\n if saved_model_convert_result:\n return saved_model_convert_result\n graph_def, input_tensors, output_tensors, frozen_func = self._freeze_keras_model()\n graph_def = self._optimize_tf_model(graph_def, input_tensors, output_tensors, frozen_func)\n return super(TFLiteKerasModelConverterV2, self).convert(graph_def, input_tensors, output_tensors)", + "docstring": "Converts a keras model based on instance variables. Returns: The converted data in serialized format. Raises: ValueError: Multiple concrete functions are specified. Input shape is not specified. Invalid quantization parameters.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:convert arg:self arguments arg Assign Call If Return return:yes Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_fuse_allreduce", + "source_code": "def _fuse_allreduce(graph: fx.Graph, comm_blocks: list[CommBlock], node_indices: dict[fx.Node, int], use_concat: bool) -> CommBlock:\n if len(comm_blocks) == 1:\n return comm_blocks[0]\n last_input_node = comm_blocks[0].inputs[0]\n last_input_index = -1\n all_input_nodes = []\n for comm_block in comm_blocks:\n input_node = comm_block.inputs[0]\n all_input_nodes.append(input_node)\n index = node_indices[input_node]\n if index >= last_input_index:\n assert index != last_input_index\n last_input_node = input_node\n last_input_index = index\n if use_concat:\n fused_comm_block = _fuse_allreduce_by_concat(graph, last_input_node, all_input_nodes, comm_blocks[-1])\n else:\n fused_comm_block = _fuse_with_coalesced_op(graph, last_input_node, all_input_nodes, comm_blocks[-1])\n _scatter_fused_allreduce_waits(graph, fused_comm_block, comm_blocks, node_indices, split_and_reshape=use_concat)\n for comm_block in comm_blocks:\n for wait in comm_block.wait_nodes:\n graph.erase_node(wait)\n graph.erase_node(comm_block.comm_node)\n graph.eliminate_dead_code()\n return fused_comm_block", + "docstring": "Given a list of allreduce CommBlock, fuse the CommBlocks into one CommBlock.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\ddp_fusion.py", + "ast_data": "FunctionDef name:_fuse_allreduce arg:graph arg:comm_blocks arg:node_indices arg:use_concat arguments arg arg arg arg If Compare Call Return return:yes Assign Assign Assign For Assign Call Assign If Compare Compare Assign Assign If Assign Call Assign Call Call For For Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_mel_to_hertz", + "source_code": "def _mel_to_hertz(mel_values, name=None):\n with ops.name_scope(name, 'mel_to_hertz', [mel_values]):\n mel_values = ops.convert_to_tensor(mel_values)\n return _MEL_BREAK_FREQUENCY_HERTZ * (math_ops.exp(mel_values / _MEL_HIGH_FREQUENCY_Q) - 1.0)", + "docstring": "Converts frequencies in from the mel scale to linear scale. Args: mel_values: A of frequencies in the mel scale. name: An optional name for the operation. Returns: A of the same shape and type as containing linear scale frequencies in Hertz.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\mel_ops.py", + "ast_data": "FunctionDef name:_mel_to_hertz arg:mel_values arg:name arguments arg arg With Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "selu", + "source_code": "@register_decomposition(aten.selu)\n@_inplace_wrapper\n@out_wrapper()\n@elementwise_type_promotion_wrapper(type_promoting_args=('a',), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef selu(a: TensorLikeType, inplace: bool=False) -> TensorLikeType:\n if inplace:\n raise NotImplementedError\n alpha = 1.6732632423543772\n scale = 1.0507009873554805\n rhs = alpha * torch.expm1(a)\n return scale * torch.where(a > 0, a, rhs)", + "docstring": "Reference implementation of torch.nn.functional.selu", + "type": "function", + "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py", + "ast_data": "FunctionDef name:selu arg:a arg:inplace arguments arg arg If Raise Assign Assign Assign Call Return return:yes Call Compare Call Call Call" + }, + { + "library": "pytorch", + "name": "is_bf16_supported", + "source_code": "def is_bf16_supported(including_emulation: bool=True) -> bool:\n if not is_available():\n return False\n return including_emulation or torch.xpu.get_device_properties().has_bfloat16_conversions", + "docstring": "Return a bool indicating if the current XPU device supports dtype bfloat16.", + "type": "function", + "file_path": "pytorch\\torch\\xpu\\__init__.py", + "ast_data": "FunctionDef name:is_bf16_supported arg:including_emulation arguments arg If Call Return return:yes Return return:yes BoolOp Call" + }, + { + "library": "tensorflow", + "name": "insert", + "source_code": "def insert(self, keys, values, name=None):\n return self.insert_or_assign(keys, values, name)", + "docstring": "Associates with . Args: keys: Keys to insert. Can be a tensor of any shape. Must match the table's key type. values: Values to be associated with keys. Must be a tensor of the same shape as and match the table's value type. name: A name for the operation (optional). Returns: The created Operation. Raises: TypeError: when or doesn't match the table data types.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:insert arg:self arg:keys arg:values arg:name arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_TransformedBoundsLocator", + "source_code": "class _TransformedBoundsLocator:\n\n def __init__(self, bounds, transform):\n self._bounds = bounds\n self._transform = transform\n\n def __call__(self, ax, renderer):\n return mtransforms.TransformedBbox(mtransforms.Bbox.from_bounds(*self._bounds), self._transform - ax.get_figure(root=False).transSubfigure)", + "docstring": "Axes locator for and similarly positioned Axes. The locator is a callable object used in to compute the Axes location depending on the renderer.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "ClassDef name:_TransformedBoundsLocator FunctionDef name:__init__ arg:self arg:bounds arg:transform arguments arg arg arg Assign Assign FunctionDef name:__call__ arg:self arg:ax arg:renderer arguments arg arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "get_dataset", + "source_code": "@abc.abstractmethod\ndef get_dataset(self):\n raise NotImplementedError", + "docstring": "Get a dataset instance for the current DataAdapter. Note that the dataset returned does not repeat for epoch, so caller might need to create new iterator for the same dataset at the beginning of the epoch. This behavior might change in future. Returns: An tf.dataset.Dataset. Caller might use the dataset in different context, eg iter(dataset) in eager to get the value directly, or in graph mode, provide the iterator tensor to Keras model function.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", + "ast_data": "FunctionDef name:get_dataset arg:self arguments arg Raise" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, checkpoint, proto_id):\n self._checkpoint = checkpoint\n self._proto_id = proto_id\n self.skip_restore = False\n self.callback = checkpoint_adapter.ReshardCallback()", + "docstring": "Specify an object within a checkpoint. Args: checkpoint: A _CheckpointRestoreCoordinator object. proto_id: The index of this object in TrackableObjectGraph.nodes.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:checkpoint arg:proto_id arguments arg arg arg Assign Assign Assign Assign Call" + }, + { + "library": "pandas", + "name": "metadata", + "source_code": "@property\ndef metadata(self):\n return getattr(self.queryables.get(self.lhs), 'metadata', None)", + "docstring": "the metadata of my field", + "type": "method", + "file_path": "pandas\\pandas\\core\\computation\\pytables.py", + "ast_data": "FunctionDef name:metadata arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_union", + "source_code": "def _union(parent: EdgeOrNode, child: EdgeOrNode, shared_with_map: dict[EdgeOrNode, EdgeOrNode]) -> None:\n root_parent = _find_root_edge_or_node(parent, shared_with_map)\n root_child = _find_root_edge_or_node(child, shared_with_map)\n shared_with_map[root_child] = root_parent", + "docstring": "Merge the subtree for with , the order is important here", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\prepare.py", + "ast_data": "FunctionDef name:_union arg:parent arg:child arg:shared_with_map arguments arg arg arg Assign Call Assign Call Assign" + }, + { + "library": "matplotlib", + "name": "expanded", + "source_code": "def expanded(self, sw, sh):\n width = self.width\n height = self.height\n deltaw = (sw * width - width) / 2.0\n deltah = (sh * height - height) / 2.0\n a = np.array([[-deltaw, -deltah], [deltaw, deltah]])\n return Bbox(self._points + a)", + "docstring": "Construct a by expanding this one around its center by the factors *sw* and *sh*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:expanded arg:self arg:sw arg:sh arguments arg arg arg Assign Assign Assign Assign Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "_get_default_qt_c", + "source_code": "def _get_default_qt_c(device: Device, dtype: Dtype) -> Tensor:\n return torch.tensor([[17, 18, 24, 47, 99, 99, 99, 99], [18, 21, 26, 66, 99, 99, 99, 99], [24, 26, 56, 99, 99, 99, 99, 99], [47, 66, 99, 99, 99, 99, 99, 99], [99, 99, 99, 99, 99, 99, 99, 99], [99, 99, 99, 99, 99, 99, 99, 99], [99, 99, 99, 99, 99, 99, 99, 99], [99, 99, 99, 99, 99, 99, 99, 99]], device=device, dtype=dtype)", + "docstring": "Generate default Quantization table of C channels.", + "type": "function", + "file_path": "kornia\\kornia\\enhance\\jpeg.py", + "ast_data": "FunctionDef name:_get_default_qt_c arg:device arg:dtype arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_torch_export_args", + "source_code": "def _get_torch_export_args(args: tuple[Any, ...], kwargs: dict[str, Any] | None) -> tuple[tuple[Any, ...], dict[str, Any] | None]:\n if not kwargs and args and isinstance(args[-1], dict):\n kwargs = args[-1]\n args = args[:-1]\n return (args, kwargs)", + "docstring": "Obtain the arguments for torch.onnx.export from the model and the input arguments.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_compat.py", + "ast_data": "FunctionDef name:_get_torch_export_args arg:args arg:kwargs arguments arg arg If BoolOp Call Assign Assign Return return:yes" + }, + { + "library": "django", + "name": "set_options", + "source_code": "def set_options(self, **options):\n self.interactive = options['interactive']\n self.verbosity = options['verbosity']\n self.symlink = options['link']\n self.clear = options['clear']\n self.dry_run = options['dry_run']\n ignore_patterns = options['ignore_patterns']\n if options['use_default_ignore_patterns']:\n ignore_patterns += apps.get_app_config('staticfiles').ignore_patterns\n self.ignore_patterns = list({os.path.normpath(p) for p in ignore_patterns})\n self.post_process = options['post_process']", + "docstring": "Set instance variables based on an options dict", + "type": "method", + "file_path": "django\\django\\contrib\\staticfiles\\management\\commands\\collectstatic.py", + "ast_data": "FunctionDef name:set_options arg:self arguments arg arg Assign Assign Assign Assign Assign Assign If Call Assign Call Call Assign" + }, + { + "library": "kornia", + "name": "normal_transform_pixel3d", + "source_code": "def normal_transform_pixel3d(depth: int, height: int, width: int, eps: float=1e-14, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Tensor:\n tr_mat = tensor([[1.0, 0.0, 0.0, -1.0], [0.0, 1.0, 0.0, -1.0], [0.0, 0.0, 1.0, -1.0], [0.0, 0.0, 0.0, 1.0]], device=device, dtype=dtype)\n width_denom: float = eps if width == 1 else width - 1.0\n height_denom: float = eps if height == 1 else height - 1.0\n depth_denom: float = eps if depth == 1 else depth - 1.0\n tr_mat[0, 0] = tr_mat[0, 0] * 2.0 / width_denom\n tr_mat[1, 1] = tr_mat[1, 1] * 2.0 / height_denom\n tr_mat[2, 2] = tr_mat[2, 2] * 2.0 / depth_denom\n return tr_mat.unsqueeze(0)", + "docstring": "Compute the normalization matrix from image size in pixels to [-1, 1]. Args: depth: image depth. height: image height. width: image width. eps: epsilon to prevent divide-by-zero errors device: device to place the result on. dtype: dtype of the result. Returns: normalized transform with shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:normal_transform_pixel3d arg:depth arg:height arg:width arg:eps arg:device arg:dtype arguments arg arg arg arg arg arg Assign Call Compare Compare Compare Assign Assign Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "traverse_state_dict_v_2_3", + "source_code": "def traverse_state_dict_v_2_3(state_dict: STATE_DICT_TYPE, visitor: Callable[[OBJ_PATH, STATE_DICT_ITEM], None], keep_traversing: Callable[[STATE_DICT_ITEM], bool]=_keep_visiting_tensors) -> None:\n\n def _is_terminal(value: STATE_DICT_ITEM) -> bool:\n values: Collection[STATE_DICT_ITEM]\n if isinstance(value, Mapping):\n values = value.values()\n elif isinstance(value, list):\n values = value\n else:\n return True\n for entry in values:\n if isinstance(entry, (Mapping, list)) and (not _is_terminal(entry)):\n return False\n if keep_traversing is not None and keep_traversing(entry):\n return False\n return True\n\n def _traverse_obj(path: OBJ_PATH, value: STATE_DICT_ITEM) -> None:\n if _is_terminal(value):\n visitor(path, value)\n elif isinstance(value, Mapping):\n for k, v in value.items():\n _traverse_obj(path + (str(k),), v)\n elif isinstance(value, list):\n for i, v in enumerate(value):\n _traverse_obj(path + (i,), v)\n for key, value in state_dict.items():\n _traverse_obj((str(key),), value)", + "docstring": "Traversal is short-circuited when if finds a collection for which `` element are traversed. Visitor takes a path argument that is a tuple of the keys used to reach it.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\_traverse.py", + "ast_data": "FunctionDef name:traverse_state_dict_v_2_3 arg:state_dict arg:visitor arg:keep_traversing arguments arg arg arg FunctionDef name:_is_terminal arg:value arguments arg If Call Assign Call If Call Assign Return return:yes For If BoolOp Call Call Return return:yes If BoolOp Compare Call Return return:yes Return return:yes FunctionDef name:_traverse_obj arg:path arg:value arguments arg arg If Call Call If Call For Call Call Call If Call For Call Call For Call Call Call" + }, + { + "library": "pytorch", + "name": "_get_restore_location", + "source_code": "def _get_restore_location(device):\n map_location = torch.serialization._serialization_tls.map_location\n if map_location is None:\n return device\n elif isinstance(map_location, dict):\n return map_location.get(device, device)\n elif isinstance(map_location, (str, torch.device)):\n return map_location\n else:\n assert callable(map_location)\n raise RuntimeError('Callable map_location not supported with _rebuild_wrapper_subclass or _rebuild_device_tensor_from_numpy')", + "docstring": "Return the map_location location. Used for rebuild functions where the tensor device is distinct from the storage", + "type": "function", + "file_path": "pytorch\\torch\\_utils.py", + "ast_data": "FunctionDef name:_get_restore_location arg:device arguments arg Assign If Compare Return return:yes If Call Return return:yes Call If Call Return return:yes Call Raise Call" + }, + { + "library": "matplotlib", + "name": "report", + "source_code": "def report(issues, show_urls=False):\n lines = []\n if show_urls:\n for i in issues:\n role = 'ghpull' if 'merged_at' in i else 'ghissue'\n number = i['number']\n title = i['title'].replace('`', '``').strip()\n lines.append(f'* :{role}:`{number}`: {title}')\n else:\n for i in issues:\n number = i['number']\n title = i['title'].replace('`', '``').strip()\n lines.append('* {number}: {title}')\n return '\\n'.join(lines)", + "docstring": "Summary report about a list of issues, printing number and title.", + "type": "function", + "file_path": "matplotlib\\tools\\github_stats.py", + "ast_data": "FunctionDef name:report arg:issues arg:show_urls arguments arg arg Assign If For Assign Compare Assign Assign Call Call Call For Assign Assign Call Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_theta_offset", + "source_code": "def get_theta_offset(self):\n return self._theta_offset.get_matrix()[0, 2]", + "docstring": "Get the offset for the location of 0 in radians.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py", + "ast_data": "FunctionDef name:get_theta_offset arg:self arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "difference", + "source_code": "def difference(self, other):\n return self._geomgen(capi.geom_diff, other)", + "docstring": "Return a new geometry consisting of the region which is the difference of this geometry and the other.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:difference arg:self arg:other arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_tensor_overload_operator", + "source_code": "@classmethod\ndef _tensor_overload_operator(cls, operator):\n tensor_operator = getattr(tensor.Tensor, operator)\n\n def _operator(v, *args, **kwargs):\n return tensor_operator(v.value(), *args, **kwargs)\n setattr(cls, operator, _operator)", + "docstring": "Delegate an operator overload to .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py", + "ast_data": "FunctionDef name:_tensor_overload_operator arg:cls arg:operator arguments arg arg Assign Call FunctionDef name:_operator arg:v arguments arg arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_dense_var_to_tensor", + "source_code": "def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n if tpu_util.enclosing_tpu_context() is None:\n return self._values[0].read_value()\n else:\n return self._read_variable_op()", + "docstring": "Converts a variable to a tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py", + "ast_data": "FunctionDef name:_dense_var_to_tensor arg:self arg:dtype arg:name arg:as_ref arguments arg arg arg arg If Compare Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_header_version", + "source_code": "def _get_header_version(path, name):\n for line in io.open(path, 'r', encoding='utf-8'):\n match = re.match('#define %s +(\\\\d+)' % name, line)\n if match:\n value = match.group(1)\n return int(value)\n raise ConfigError('#define \"{}\" is either\\n'.format(name) + ' not present in file {} OR\\n'.format(path) + ' its value is not an integer literal')", + "docstring": "Returns preprocessor defines in C header file.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_sycl_config.py", + "ast_data": "FunctionDef name:_get_header_version arg:path arg:name arguments arg arg For Call Assign Call If Assign Call Return return:yes Call Raise Call Call Call" + }, + { + "library": "matplotlib", + "name": "_read", + "source_code": "def _read(self):\n down_stack = [0]\n self._baseline_v = None\n while True:\n byte = self.file.read(1)[0]\n self._dtable[byte](self, byte)\n if self._missing_font:\n raise self._missing_font.to_exception()\n name = self._dtable[byte].__name__\n if name == '_push':\n down_stack.append(down_stack[-1])\n elif name == '_pop':\n down_stack.pop()\n elif name == '_down':\n down_stack[-1] += 1\n if self._baseline_v is None and len(getattr(self, 'stack', [])) == 3 and (down_stack[-1] >= 4):\n self._baseline_v = self.v\n if byte == 140:\n return True\n if self.state is _dvistate.post_post:\n self.close()\n return False", + "docstring": "Read one page from the file. Return True if successful, False if there were no more pages.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\dviread.py", + "ast_data": "FunctionDef name:_read arg:self arguments arg Assign Assign While Assign Call Call If Raise Call Assign If Compare Call If Compare Call If Compare If BoolOp Compare Compare Call Call Compare Assign If Compare Return return:yes If Compare Call Return return:yes" + }, + { + "library": "pygame", + "name": "make_surface", + "source_code": "def make_surface(array):\n if isinstance(array, numpy_ndarray) and array.dtype in numpy_floats:\n array = array.round(0).astype(numpy_uint32)\n return pix_make_surface(array)", + "docstring": "pygame.surfarray.make_surface (array): return Surface Copy an array to a new surface. Create a new Surface that best resembles the data and format on the array. The array can be 2D or 3D with any sized integer values.", + "type": "function", + "file_path": "pygame\\src_py\\surfarray.py", + "ast_data": "FunctionDef name:make_surface arg:array arguments arg If BoolOp Call Compare Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "adjust_debug_info_func_names", + "source_code": "def adjust_debug_info_func_names(self, debug_info):\n output_debug_info = graph_debug_info_pb2.GraphDebugInfo()\n output_debug_info.files[:] = debug_info.files\n for key in debug_info.traces:\n node, func = key.split('@')\n new_func = ''\n if func in self._concrete_functions:\n new_func = self._concrete_functions[func].function_def.signature.name\n output_debug_info.traces[node + '@' + new_func].CopyFrom(debug_info.traces[key])\n return output_debug_info", + "docstring": "Rewrite func names in the debug info by using the concrete func names.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load.py", + "ast_data": "FunctionDef name:adjust_debug_info_func_names arg:self arg:debug_info arguments arg arg Assign Call Assign For Assign Call Assign If Compare Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "polynomial_atoms", + "source_code": "def polynomial_atoms(self):\n found = set()\n\n def visit(expr, found=found):\n if expr.op is Op.FACTORS:\n for b in expr.data:\n b.traverse(visit)\n return expr\n if expr.op in (Op.TERMS, Op.COMPLEX):\n return\n if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp):\n if expr.data[0] is ArithOp.POW:\n expr.data[1][0].traverse(visit)\n return expr\n return\n if expr.op in (Op.INTEGER, Op.REAL):\n return expr\n found.add(expr)\n if expr.op in (Op.INDEXING, Op.APPLY):\n return expr\n self.traverse(visit)\n return found", + "docstring": "Return a set of expressions used as atoms in polynomial self.", + "type": "method", + "file_path": "numpy\\numpy\\f2py\\symbolic.py", + "ast_data": "FunctionDef name:polynomial_atoms arg:self arguments arg Assign Call FunctionDef name:visit arg:expr arg:found arguments arg arg If Compare For Call Return return:yes If Compare Return return:no If BoolOp Compare Call If Compare Call Return return:yes Return return:no If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "ParallelStyle", + "source_code": "class ParallelStyle(ABC):\n src_data_rank: Optional[int] = 0\n\n @abstractmethod\n def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module:\n ...", + "docstring": "The parallel style contract defines how the module or submodule should be parallelized. It only defines the `` to use, this allows maximum flexibility for different kind of style implementations.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\tensor\\parallel\\style.py", + "ast_data": "ClassDef name:ParallelStyle FunctionDef name:_apply arg:self arg:module arg:device_mesh arguments arg arg arg" + }, + { + "library": "matplotlib", + "name": "HBoxDivider", + "source_code": "class HBoxDivider(SubplotDivider):\n\n def new_locator(self, nx, nx1=None):\n return super().new_locator(nx, 0, nx1, 0)\n\n def _locate(self, nx, ny, nx1, ny1, axes, renderer):\n nx += self._xrefindex\n nx1 += self._xrefindex\n fig_w, fig_h = self._fig.bbox.size / self._fig.dpi\n x, y, w, h = self.get_position_runtime(axes, renderer)\n summed_ws = self.get_horizontal_sizes(renderer)\n equal_hs = self.get_vertical_sizes(renderer)\n x0, y0, ox, hh = _locate(x, y, w, h, summed_ws, equal_hs, fig_w, fig_h, self.get_anchor())\n if nx1 is None:\n nx1 = -1\n x1, w1 = (x0 + ox[nx] / fig_w, (ox[nx1] - ox[nx]) / fig_w)\n y1, h1 = (y0, hh)\n return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)", + "docstring": "A for laying out axes horizontally, while ensuring that they have equal heights. Examples -------- .. plot:: gallery/axes_grid1/demo_axes_hbox_divider.py", + "type": "class", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py", + "ast_data": "ClassDef name:HBoxDivider FunctionDef name:new_locator arg:self arg:nx arg:nx1 arguments arg arg arg Return return:yes Call Call FunctionDef name:_locate arg:self arg:nx arg:ny arg:nx1 arg:ny1 arg:axes arg:renderer arguments arg arg arg arg arg arg arg Assign Assign Call Assign Call Assign Call Assign Call Call If Compare Assign Assign Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_AddAndReturnMatrix", + "source_code": "class _AddAndReturnMatrix(_Adder):\n\n def can_add(self, op1, op2):\n return isinstance(op1, linear_operator.LinearOperator) and isinstance(op2, linear_operator.LinearOperator)\n\n def _add(self, op1, op2, operator_name, hints):\n if _type(op1) in _EFFICIENT_ADD_TO_TENSOR:\n op_add_to_tensor, op_other = (op1, op2)\n else:\n op_add_to_tensor, op_other = (op2, op1)\n return linear_operator_full_matrix.LinearOperatorFullMatrix(matrix=op_add_to_tensor.add_to_tensor(op_other.to_dense()), is_non_singular=hints.is_non_singular, is_self_adjoint=hints.is_self_adjoint, is_positive_definite=hints.is_positive_definite, name=operator_name)", + "docstring": "\"Handles additions resulting in a .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_addition.py", + "ast_data": "ClassDef name:_AddAndReturnMatrix FunctionDef name:can_add arg:self arg:op1 arg:op2 arguments arg arg arg Return return:yes BoolOp Call Call FunctionDef name:_add arg:self arg:op1 arg:op2 arg:operator_name arg:hints arguments arg arg arg arg arg If Compare Call Assign Assign Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "_pad_along_last_axis", + "source_code": "def _pad_along_last_axis(X, m, *, xp):\n shape = X.shape[:-1] + (m,)\n Xl = xp.broadcast_to(X[..., :1], shape)\n Xr = xp.broadcast_to(X[..., -1:], shape)\n return xp.concat((Xl, X, Xr), axis=-1)", + "docstring": "Pad the data for computing the rolling window difference.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_entropy.py", + "ast_data": "FunctionDef name:_pad_along_last_axis arg:X arg:m arguments arg arg arg Assign Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "sub", + "source_code": "@register_decomposition(aten.sub)\n@out_wrapper()\n@elementwise_type_promotion_wrapper(type_promoting_args=('a', 'b'), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef sub(a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType], *, alpha: NumberType=1):\n a, b = _maybe_broadcast(a, b)\n if isinstance(a, TensorLike) and isinstance(b, TensorLike):\n torch._check(not utils.is_boolean_dtype(a.dtype) and (not utils.is_boolean_dtype(b.dtype)), lambda: 'Subtraction, the `-` operator, with two bool tensors is not supported. Use the `^` or `logical_xor()` operator instead.')\n if alpha != 1:\n dtype = a.dtype if isinstance(a, TensorLike) else b.dtype\n python_type = utils.dtype_to_type(dtype)\n if not utils.is_weakly_lesser_type(type(alpha), python_type):\n msg = f'alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!'\n raise ValueError(msg)\n if isinstance(b, torch.Tensor):\n b = prims.mul(b, alpha)\n else:\n b = b * alpha\n output = prims.sub(a, b)\n return handle_noncontiguous_outputs([a, b], output)", + "docstring": "Reference implementation of torch.sub", + "type": "function", + "file_path": "pytorch\\torch\\_refs\\__init__.py", + "ast_data": "FunctionDef name:sub arg:a arg:b arguments arg arg arg Assign Call If BoolOp Call Call Call BoolOp Call Call arguments If Compare Assign Call Assign Call If Call Call Assign Call Raise Call If Call Assign Call Assign Assign Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_DefaultDistributionStrategy", + "source_code": "class _DefaultDistributionStrategy(Strategy):\n\n def __init__(self):\n if not _creating_default_strategy_singleton:\n raise RuntimeError('Should only create a single instance of _DefaultDistributionStrategy')\n super(_DefaultDistributionStrategy, self).__init__(_DefaultDistributionExtended(self))\n\n def __deepcopy__(self, memo):\n del memo\n raise RuntimeError('Should only create a single instance of _DefaultDistributionStrategy')", + "docstring": "Default if none is explicitly selected.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "ClassDef name:_DefaultDistributionStrategy FunctionDef name:__init__ arg:self arguments arg If Raise Call Call Call Call FunctionDef name:__deepcopy__ arg:self arg:memo arguments arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "create_instruction", + "source_code": "def create_instruction(name, *, arg=None, argval=_NotProvided, target=None) -> Instruction:\n if inst_has_op_bits(name):\n if target is not None:\n raise RuntimeError('target cannot be specified for instruction')\n if arg is None:\n arg = 0\n else:\n cnt = (arg is not None) + (argval is not _NotProvided) + (target is not None)\n if cnt > 1:\n raise RuntimeError('only one of arg, argval, and target can be not None/_NotProvided')\n if arg is not None and (not isinstance(arg, int)):\n raise RuntimeError('instruction arg must be int or None')\n return Instruction(opcode=dis.opmap[name], opname=name, arg=arg, argval=argval, target=target)", + "docstring": "At most one of , , and can be not None/_NotProvided. This is to prevent ambiguity, e.g. does create_instruction(\"LOAD_CONST\", 5) mean load the constant at co_consts[5], or load the constant 5? If is not provided, it will be computed during assembly from or . Bits in the args of instructions LOAD_GLOBAL, LOAD_ATTR (3.12+), and LOAD_SUPER_ATTR modify the behavior of the instruction. In this case, we allow both and to be set. The value of here is expected to be the value of the op bits and the true value of will be computed during assembly. If is not set, the bits are assumed to be 0.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", + "ast_data": "FunctionDef name:create_instruction arg:name arguments arg arg arg arg If Call If Compare Raise Call If Compare Assign Assign Compare Compare Compare If Compare Raise Call If BoolOp Compare Call Raise Call Return return:yes Call" + }, + { + "library": "authlib", + "name": "serialize", + "source_code": "def serialize(self, header, payload, key):\n if isinstance(header, (list, tuple)):\n return self.serialize_json(header, payload, key)\n if 'protected' in header:\n return self.serialize_json(header, payload, key)\n return self.serialize_compact(header, payload, key)", + "docstring": "Generate a JWS Serialization. It will automatically generate a Compact or JSON Serialization depending on the given header. If a header is in a JSON header format, it will call :meth:, otherwise it will call :meth:. :param header: A dict/list of header :param payload: A string/dict of payload :param key: Private key used to generate signature :return: byte/dict", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7515\\jws.py", + "ast_data": "FunctionDef name:serialize arg:self arg:header arg:payload arg:key arguments arg arg arg arg If Call Return return:yes Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "rvs", + "source_code": "def rvs(self, m, n, size=None, random_state=None):\n M, m, n, _, _, _ = self._process_parameters(m, n)\n random_state = self._get_random_state(random_state)\n if size is not None and isinstance(size, int):\n size = (size,)\n if size is None:\n rvs = np.empty(m.shape, dtype=m.dtype)\n else:\n rvs = np.empty(size + (m.shape[-1],), dtype=m.dtype)\n rem = M\n for c in range(m.shape[-1] - 1):\n rem = rem - m[..., c]\n n0mask = n == 0\n rvs[..., c] = ~n0mask * random_state.hypergeometric(m[..., c], rem + n0mask, n + n0mask, size=size)\n n = n - rvs[..., c]\n rvs[..., m.shape[-1] - 1] = n\n return rvs", + "docstring": "Draw random samples from a multivariate hypergeometric distribution. Parameters ---------- %(_doc_default_callparams)s size : integer or iterable of integers, optional Number of samples to draw. Default is `multivariate_hypergeometric` sampler is not used as it doesn't support broadcasting.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:rvs arg:self arg:m arg:n arg:size arg:random_state arguments arg arg arg arg arg Assign Call Assign Call If BoolOp Compare Call Assign If Compare Assign Call Assign Call Assign For Call Assign Assign Compare Assign Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "dimension_at_index", + "source_code": "@tf_export('compat.dimension_at_index', v1=['dimension_at_index', 'compat.dimension_at_index'])\ndef dimension_at_index(shape, index) -> 'Dimension':\n assert isinstance(shape, TensorShape)\n if shape.rank is None:\n return Dimension(None)\n else:\n return shape.dims[index]", + "docstring": "Compatibility utility required to allow for both V1 and V2 behavior in TF. Until the release of TF 2.0, we need the legacy behavior of to coexist with the new behavior. This utility is a bridge between the two. If you want to retrieve the Dimension instance corresponding to a certain index in a TensorShape instance, use this utility, like this: Args: shape: A TensorShape instance. index: An integer index. Returns: A dimension object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", + "ast_data": "FunctionDef name:dimension_at_index arg:shape arg:index arguments arg arg Call If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "interned_modules", + "source_code": "def interned_modules(self) -> list[str]:\n return self._nodes_with_action_type(_ModuleProviderAction.INTERN)", + "docstring": "Return all modules that are currently interned. Returns: A list containing the names of modules which will be interned in this package.", + "type": "method", + "file_path": "pytorch\\torch\\package\\package_exporter.py", + "ast_data": "FunctionDef name:interned_modules arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "SquaredHinge", + "source_code": "class SquaredHinge(MeanMetricWrapper):\n\n def __init__(self, name='squared_hinge', dtype=None):\n super(SquaredHinge, self).__init__(squared_hinge, name, dtype=dtype)", + "docstring": "Computes the squared hinge metric between and . values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.SquaredHinge() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]]) >>> m.result().numpy() 1.86 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], ... sample_weight=[1, 0]) >>> m.result().numpy() 1.46 Usage with API:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "ClassDef name:SquaredHinge FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call" + }, + { + "library": "pytorch", + "name": "all_gather", + "source_code": "def all_gather(tensor, group=group.WORLD):\n return _AllGather.apply(group, tensor)", + "docstring": "Gathers tensors from the whole group in a list. Arguments: tensor (Tensor): Tensor to be broadcast from current process. group (ProcessGroup, optional): The process group to work on. Returns: tuple([Tensor]): Output of the collective.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\nn\\functional.py", + "ast_data": "FunctionDef name:all_gather arg:tensor arg:group arguments arg arg Return return:yes Call" + }, + { + "library": "numpy", + "name": "_round_if_needed", + "source_code": "def _round_if_needed(arr, dtype):\n if np.issubdtype(dtype, np.integer):\n arr.round(out=arr)", + "docstring": "Rounds arr inplace if destination dtype is integer. Parameters ---------- arr : ndarray Input array. dtype : dtype The dtype of the destination array.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_arraypad_impl.py", + "ast_data": "FunctionDef name:_round_if_needed arg:arr arg:dtype arguments arg arg If Call Call" + }, + { + "library": "tensorflow", + "name": "never_record_summaries", + "source_code": "def never_record_summaries():\n return record_if(False)", + "docstring": "Sets the should_record_summaries Tensor to always false.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:never_record_summaries arguments Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "convert_n_to_tensor", + "source_code": "def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None) -> list[Union[EagerTensor, SymbolicTensor]]:\n return internal_convert_n_to_tensor(values=values, dtype=dtype, name=name, preferred_dtype=preferred_dtype, as_ref=False)", + "docstring": "Converts to a list of objects. Args: values: A list of objects that can be consumed by . dtype: (Optional.) The required of the returned objects. name: (Optional.) A name prefix to used when a new is created, in which case element will be given the name . preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to is not possible, this argument has no effect. Returns: A list of and/or objects. Raises: TypeError: If no conversion function is registered for an element in . RuntimeError: If a registered conversion function returns an invalid value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:convert_n_to_tensor arg:values arg:dtype arg:name arg:preferred_dtype arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "softplus", + "source_code": "def softplus(x, **kwargs):\n return np.logaddexp(0, x, **kwargs)", + "docstring": "Compute the softplus function element-wise. The softplus function is defined as: `ufunc docs `. Examples -------- >>> from scipy import special >>> special.softplus(0) 0.6931471805599453 >>> special.softplus([-1, 0, 1]) array([0.31326169, 0.69314718, 1.31326169])", + "type": "function", + "file_path": "scipy\\scipy\\special\\_basic.py", + "ast_data": "FunctionDef name:softplus arg:x arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "html_safe", + "source_code": "def html_safe(klass):\n if '__html__' in klass.__dict__:\n raise ValueError(\"can't apply @html_safe to %s because it defines __html__().\" % klass.__name__)\n if '__str__' not in klass.__dict__:\n raise ValueError(\"can't apply @html_safe to %s because it doesn't define __str__().\" % klass.__name__)\n klass_str = klass.__str__\n klass.__str__ = lambda self: mark_safe(klass_str(self))\n klass.__html__ = lambda self: str(self)\n return klass", + "docstring": "A decorator that defines the __html__ method. This helps non-Django templates to detect classes whose __str__ methods return SafeString.", + "type": "function", + "file_path": "django\\django\\utils\\html.py", + "ast_data": "FunctionDef name:html_safe arg:klass arguments arg If Compare Raise Call If Compare Raise Call Assign Assign arguments arg Call Call Assign arguments arg Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_cached_call", + "source_code": "def _cached_call(cache, estimator, response_method, *args, **kwargs):\n if cache is not None and response_method in cache:\n return cache[response_method]\n result, _ = _get_response_values(estimator, *args, response_method=response_method, **kwargs)\n if cache is not None:\n cache[response_method] = result\n return result", + "docstring": "Call estimator with method and args and kwargs.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\metrics\\_scorer.py", + "ast_data": "FunctionDef name:_cached_call arg:cache arg:estimator arg:response_method arguments arg arg arg arg arg If BoolOp Compare Compare Return return:yes Assign Call If Compare Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "ValueRangesSLoc", + "source_code": "@dataclass\nclass ValueRangesSLoc:\n lower: SLoc\n upper: SLoc", + "docstring": "Locations of the guards that triggered lower and upper bound.", + "type": "class", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "ClassDef name:ValueRangesSLoc" + }, + { + "library": "authlib", + "name": "validate_application_type", + "source_code": "def validate_application_type(self):\n self.setdefault('application_type', 'web')\n if self.get('application_type') not in ('web', 'native'):\n raise InvalidClaimError('application_type')\n self._validate_claim_value('application_type')", + "docstring": "Kind of the application. The default, if omitted, is web. The defined values are native or web. Web Clients using the OAuth Implicit Grant Type MUST only register URLs using the https scheme as redirect_uris; they MUST NOT use localhost as the hostname. Native Clients MUST only register redirect_uris using custom URI schemes or loopback URLs using the http scheme; loopback URLs use localhost or the IP loopback literals 127.0.0.1 or [::1] as the hostname. Authorization Servers MAY place additional constraints on Native Clients. Authorization Servers MAY reject Redirection URI values using the http scheme, other than the loopback case for Native Clients. The Authorization Server MUST verify that all the registered redirect_uris conform to these constraints. This prevents sharing a Client ID across different types of Clients.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\registration\\claims.py", + "ast_data": "FunctionDef name:validate_application_type arg:self arguments arg Call If Compare Call Raise Call Call" + }, + { + "library": "pytorch", + "name": "_split_autocast", + "source_code": "def _split_autocast(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:\n enter_autocast_node_stack: list[torch.fx.Node] = []\n first_node_after_outer_most_exit: bool = False\n\n def node_call_back(node: torch.fx.Node) -> bool:\n nonlocal enter_autocast_node_stack, first_node_after_outer_most_exit\n increment_id = False\n if first_node_after_outer_most_exit or (len(enter_autocast_node_stack) == 0 and _is_enter_autocast_node(node)):\n assert len(enter_autocast_node_stack) == 0\n first_node_after_outer_most_exit = False\n increment_id = True\n if _is_enter_autocast_node(node):\n enter_autocast_node_stack.append(node)\n elif _is_exit_autocast_node(node):\n assert len(enter_autocast_node_stack) > 0\n last_enter_autocast_node = enter_autocast_node_stack.pop()\n assert node.args[0] == last_enter_autocast_node\n if len(enter_autocast_node_stack) == 0:\n first_node_after_outer_most_exit = True\n return increment_id\n return sequential_split(gm, node_call_back)", + "docstring": "split_autocast creates a new graph module that splits the input graph module into multiple submodules based on the and nodes. It doesn't mutate the input graph module. Nodes between the **outer-most** and are splitted into a submodule. Nested autocast regions are not splitted. and nodes are in the submodule as well. Below is an example of splitting. A, B, C, D, E are blocks of non-autocast nodes in the original graph module. Nodes marked with the same number are grouped into the same submodule. A # 0 enter_autocast # 1 B # 1 exit_autocast # 1 C # 2 enter_autocast # 3 D # 3 exit_autocast # 3 E # 4", + "type": "function", + "file_path": "pytorch\\torch\\_export\\passes\\replace_autocast_with_hop_pass.py", + "ast_data": "FunctionDef name:_split_autocast arg:gm arguments arg FunctionDef name:node_call_back arg:node arguments arg Assign If BoolOp BoolOp Compare Call Call Compare Call Assign Assign If Call Call If Call Compare Call Assign Call Compare If Compare Call Assign Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_count_nonzero", + "source_code": "def _count_nonzero(input_tensor, dtype=dtypes.int64):\n with ops.name_scope('count_nonzero', values=[input_tensor]):\n zero = array_ops.zeros([], dtype=input_tensor.dtype)\n nonzero_count = math_ops.reduce_sum(math_ops.cast(math_ops.not_equal(input_tensor, zero), dtype=dtype), name='nonzero_count')\n return nonzero_count", + "docstring": "Same as math_ops.count_nonzero. The reduction is done in dtype, which can be faster for 32-bit dtypes. Args: input_tensor: numeric tensor dtype: reduction dtype Returns: number of nonzero values with type dtype", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_impl.py", + "ast_data": "FunctionDef name:_count_nonzero arg:input_tensor arg:dtype arguments arg arg With Call Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "write_release_task", + "source_code": "def write_release_task(options, filename='README'):\n idirs = options.installers.installersdir\n notes = paver.path.path(RELEASE_NOTES)\n rst_readme = paver.path.path(filename + '.rst')\n md_readme = paver.path.path(filename + '.md')\n with open(rst_readme, 'w') as freadme:\n with open(notes) as fnotes:\n freadme.write(fnotes.read())\n freadme.writelines(textwrap.dedent('\\n Checksums\\n =========\\n\\n MD5\\n ---\\n ::\\n\\n '))\n freadme.writelines([f' {c}\\n' for c in compute_md5(idirs)])\n freadme.writelines(textwrap.dedent('\\n SHA256\\n ------\\n ::\\n\\n '))\n freadme.writelines([f' {c}\\n' for c in compute_sha256(idirs)])\n sh(f'pandoc -s -o {md_readme} {rst_readme}')\n if hasattr(options, 'gpg_key'):\n cmd = f'gpg --clearsign --armor --default_key {options.gpg_key}'\n else:\n cmd = 'gpg --clearsign --armor'\n sh(cmd + f' --output {rst_readme}.gpg {rst_readme}')\n sh(cmd + f' --output {md_readme}.gpg {md_readme}')", + "docstring": "Append hashes of release files to release notes. This appends file hashes to the release notes and creates four README files of the result in various formats: - README.rst - README.rst.gpg - README.md - README.md.gpg The md file are created using so that the links are properly updated. The gpg files are kept separate, so that the unsigned files may be edited before signing if needed. Parameters ---------- options : Set by `` decorator. filename : str Filename of the modified notes. The file is written in the release directory.", + "type": "function", + "file_path": "numpy\\pavement.py", + "ast_data": "FunctionDef name:write_release_task arg:options arg:filename arguments arg arg Assign Assign Call Assign Call Assign Call With Call With Call Call Call Call Call Call Call Call Call Call Call Call If Call Assign Assign Call Call" + }, + { + "library": "seaborn", + "name": "savefig", + "source_code": "def savefig(self, *args, **kwargs):\n kwargs = kwargs.copy()\n kwargs.setdefault('bbox_inches', 'tight')\n self.figure.savefig(*args, **kwargs)", + "docstring": "Save an image of the plot. This wraps :meth:, using bbox_inches=\"tight\" by default. Parameters are passed through to the matplotlib function.", + "type": "method", + "file_path": "seaborn\\seaborn\\axisgrid.py", + "ast_data": "FunctionDef name:savefig arg:self arguments arg arg arg Assign Call Call Call" + }, + { + "library": "scipy", + "name": "jarque_bera", + "source_code": "@xp_capabilities(jax_jit=False, allow_dask_compute=True)\n@_axis_nan_policy_factory(SignificanceResult, default_axis=None)\ndef jarque_bera(x, *, axis=None):\n xp = array_namespace(x)\n x, axis = _chk_asarray(x, axis, xp=xp)\n mu = _xp_mean(x, axis=axis, keepdims=True)\n diffx = x - mu\n s = skew(diffx, axis=axis, _no_deco=True)\n k = kurtosis(diffx, axis=axis, _no_deco=True)\n n = xp.asarray(_length_nonmasked(x, axis), dtype=mu.dtype)\n statistic = n / 6 * (s ** 2 + k ** 2 / 4)\n chi2 = _SimpleChi2(xp.asarray(2.0, dtype=mu.dtype))\n pvalue = _get_pvalue(statistic, chi2, alternative='greater', symmetric=False, xp=xp)\n statistic = statistic[()] if statistic.ndim == 0 else statistic\n pvalue = pvalue[()] if pvalue.ndim == 0 else pvalue\n return SignificanceResult(statistic, pvalue)", + "docstring": "Perform the Jarque-Bera goodness of fit test on sample data. The Jarque-Bera test tests whether the sample data has the skewness and kurtosis matching a normal distribution. Note that this test only works for a large enough number of data samples (>2000) as the test statistic asymptotically has a Chi-squared distribution with 2 degrees of freedom. Parameters ---------- x : array_like Observations of a random variable. axis : int or None, default: 0 If an int, the axis of the input along which to compute the statistic. The statistic of each axis-slice (e.g. row) of the input will appear in a corresponding element of the output. If `hypothesis_jarque_berahypothesis_jarque_bera`.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_stats_py.py", + "ast_data": "FunctionDef name:jarque_bera arg:x arguments arg arg Assign Call Assign Call Assign Call Assign Assign Call Assign Call Assign Call Call Assign Assign Call Call Assign Call Assign Compare Assign Compare Return return:yes Call Call Call" + }, + { + "library": "sphinx", + "name": "hlist", + "source_code": "class hlist(nodes.Element):\n pass", + "docstring": "Node for \"horizontal lists\", i.e. lists that should be compressed to take up less vertical space.", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:hlist" + }, + { + "library": "scipy", + "name": "_as_float_array", + "source_code": "def _as_float_array(x, check_finite=False):\n x = np.ascontiguousarray(x)\n dtyp = _get_dtype(x.dtype)\n x = x.astype(dtyp, copy=False)\n if check_finite and (not np.isfinite(x).all()):\n raise ValueError('Array must not contain infs or nans.')\n return x", + "docstring": "Convert the input into a C contiguous float array. NB: Upcasts half- and single-precision floats to double precision.", + "type": "function", + "file_path": "scipy\\scipy\\interpolate\\_bsplines.py", + "ast_data": "FunctionDef name:_as_float_array arg:x arg:check_finite arguments arg arg Assign Call Assign Call Assign Call If BoolOp Call Call Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "RegexLocation", + "source_code": "@dataclasses.dataclass\nclass RegexLocation:\n path: str\n line_number: int\n line_contents: str\n matched_text: str", + "docstring": "Path and line where a prohibited regex was found. Attributes: path: Path of the file which has the prohibited regex. line_number: The number of the offending line. line_contents: The text of the offending line. matched_text: The exact string matched by the regex.", + "type": "class", + "file_path": "tensorflow\\third_party\\xla\\build_tools\\lint\\check_contents.py", + "ast_data": "ClassDef name:RegexLocation" + }, + { + "library": "django", + "name": "commit", + "source_code": "@async_unsafe\ndef commit(self):\n self.validate_thread_sharing()\n self.validate_no_atomic_block()\n self._commit()\n self.errors_occurred = False\n self.run_commit_hooks_on_set_autocommit_on = True", + "docstring": "Commit a transaction and reset the dirty flag.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\base.py", + "ast_data": "FunctionDef name:commit arg:self arguments arg Call Call Call Assign Assign" + }, + { + "library": "scipy", + "name": "moderatec", + "source_code": "def moderatec(c):\n np.nan_to_num(c, copy=False, nan=CONSTRMAX)\n c = np.clip(c, -CONSTRMAX, CONSTRMAX)\n return c", + "docstring": "This function moderates the constraint value, the constraint demanding this value to be NONNEGATIVE. It replaces any value below -CONSTRMAX by -CONSTRMAX, and any NaN or value above CONSTRMAX by CONSTRMAX.", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\evaluate.py", + "ast_data": "FunctionDef name:moderatec arg:c arguments arg Call Assign Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "readlines", + "source_code": "def readlines(self, sizehint=None):\n return self.fp.readlines(sizehint)", + "docstring": "Read some byte lines from the connection.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpreqbody.py", + "ast_data": "FunctionDef name:readlines arg:self arg:sizehint arguments arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "slice_indexer", + "source_code": "def slice_indexer(self, start: Hashable | None=None, end: Hashable | None=None, step: int | None=None) -> slice:\n start_slice, end_slice = self.slice_locs(start, end, step=step)\n if not is_scalar(start_slice):\n raise AssertionError('Start slice bound is non-scalar')\n if not is_scalar(end_slice):\n raise AssertionError('End slice bound is non-scalar')\n return slice(start_slice, end_slice, step)", + "docstring": "Compute the slice indexer for input labels and step. Index needs to be ordered and unique. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, default None If None, defaults to 1. Returns ------- slice A slice object. Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. See Also -------- Index.slice_locs : Computes slice locations for input labels. Index.get_slice_bound : Retrieves slice bound that corresponds to given label. Notes ----- This function assumes that the data is sorted, so use at your own peril. Examples -------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list(\"abcd\")) >>> idx.slice_indexer(start=\"b\", end=\"c\") slice(1, 3, None) >>> idx = pd.MultiIndex.from_arrays([list(\"abcd\"), list(\"efgh\")]) >>> idx.slice_indexer(start=\"b\", end=(\"c\", \"g\")) slice(1, 3, None)", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:slice_indexer arg:self arg:start arg:end arg:step arguments arg arg arg arg Assign Call If Call Raise Call If Call Raise Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "filter", + "source_code": "@staticmethod\ndef filter(node_schedule: list[NodeScheduleEntry]) -> Iterable[SchedulerNode]:\n disabled = False\n for node in node_schedule:\n if node in (EnableReduction, DisableReduction):\n disabled = node is DisableReduction\n elif disabled:\n pass\n else:\n yield node", + "docstring": "Get the nodes from node_schedule skipping those in a DisableReduction block.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\simd_kernel_features.py", + "ast_data": "FunctionDef name:filter arg:node_schedule arguments arg Assign For If Compare Assign Compare If" + }, + { + "library": "matplotlib", + "name": "get_fontname", + "source_code": "def get_fontname(self):\n return self._header[b'FontName']", + "docstring": "Return the font name, e.g., 'Times-Roman'.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_afm.py", + "ast_data": "FunctionDef name:get_fontname arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_safe_initial_value_from_op", + "source_code": "def _safe_initial_value_from_op(name, op, op_cache):\n op_type = op.node_def.op\n if op_type in ('IsVariableInitialized', 'VarIsInitializedOp', 'ReadVariableOp', 'If'):\n return op\n if op_type in ('Variable', 'VariableV2', 'VarHandleOp'):\n initialized_value = _find_initialized_value_for_variable(op)\n return op if initialized_value is None else initialized_value.op\n modified = False\n new_op_inputs = []\n for op_input in op.inputs:\n new_op_input = _safe_initial_value_from_tensor(name, op_input, op_cache)\n new_op_inputs.append(new_op_input)\n modified = modified or new_op_input != op_input\n if modified:\n new_op_type = op_type\n if new_op_type == 'RefSwitch':\n new_op_type = 'Switch'\n new_op_name = op.node_def.name + '_' + name\n new_op_name = new_op_name.replace(':', '_')\n return op.graph.create_op(new_op_type, new_op_inputs, op._output_types, name=new_op_name, attrs=op.node_def.attr)\n return op", + "docstring": "Replace dependencies on variables with their initialized values. Args: name: Variable name. op: An . The operation to replace. op_cache: A dict mapping operation names to s. Used to memoize the results so as to avoid creating redundant operations. Returns: An compatible with . Any inputs that lead to variable values will be replaced with a corresponding graph that uses the variable's initialized values. This is done on a best-effort basis. If no modifications need to be made then will be returned unchanged.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:_safe_initial_value_from_op arg:name arg:op arg:op_cache arguments arg arg arg Assign If Compare Return return:yes If Compare Assign Call Return return:yes Compare Assign Assign For Assign Call Call Assign BoolOp Compare If Assign If Compare Assign Assign Assign Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "tanh", + "source_code": "@dispatch.add_dispatch_support\ndef tanh(x):\n return nn.tanh(x)", + "docstring": "Hyperbolic tangent activation function. For example: >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32) >>> b = tf.keras.activations.tanh(a) >>> b.numpy() array([-0.9950547, -0.7615942, 0., 0.7615942, 0.9950547], dtype=float32) Args: x: Input tensor. Returns: Tensor of same shape and dtype of input , with tanh activation: .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py", + "ast_data": "FunctionDef name:tanh arg:x arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "csrf_exempt", + "source_code": "def csrf_exempt(view_func):\n if iscoroutinefunction(view_func):\n\n async def _view_wrapper(request, *args, **kwargs):\n return await view_func(request, *args, **kwargs)\n else:\n\n def _view_wrapper(request, *args, **kwargs):\n return view_func(request, *args, **kwargs)\n _view_wrapper.csrf_exempt = True\n return wraps(view_func)(_view_wrapper)", + "docstring": "Mark a view function as being exempt from the CSRF view protection.", + "type": "function", + "file_path": "django\\django\\views\\decorators\\csrf.py", + "ast_data": "FunctionDef name:csrf_exempt arg:view_func arguments arg If Call AsyncFunctionDef name:_view_wrapper arg:request arguments arg arg arg Return return:yes Call FunctionDef name:_view_wrapper arg:request arguments arg arg arg Return return:yes Call Assign Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_compute_cost_div_m", + "source_code": "def _compute_cost_div_m(m, p, norm_info):\n return int(np.ceil(norm_info.alpha(p) / _theta[m]))", + "docstring": "A helper function for computing bounds. This is equation (3.10). It measures cost in terms of the number of required matrix products. Parameters ---------- m : int A valid key of _theta. p : int A matrix power. norm_info : LazyOperatorNormInfo Information about 1-norms of related operators. Returns ------- cost_div_m : int Required number of matrix products divided by m.", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py", + "ast_data": "FunctionDef name:_compute_cost_div_m arg:m arg:p arg:norm_info arguments arg arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "state", + "source_code": "@property\ndef state(self):\n return self._state_var", + "docstring": "The internal state of the RNG.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py", + "ast_data": "FunctionDef name:state arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "variables", + "source_code": "def variables(self):\n return self._weights", + "docstring": "Returns variables of this Optimizer based on the order created.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py", + "ast_data": "FunctionDef name:variables arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_resource_apply_sparse", + "source_code": "def _resource_apply_sparse(self, grad, handle, indices, apply_state):\n raise NotImplementedError('Must be implemented in subclasses.')", + "docstring": "Add ops to apply sparse gradients to the variable . Similar to , the argument to this method has been de-duplicated. Optimizers which deal correctly with non-unique indices may instead override to avoid this overhead. Args: grad: a representing the gradient for the affected indices. handle: a of dtype which points to the variable to be updated. indices: a of integral type representing the indices for which the gradient is nonzero. Indices are unique. apply_state: A dict which is used across multiple apply calls. Returns: An which updates the value of the variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py", + "ast_data": "FunctionDef name:_resource_apply_sparse arg:self arg:grad arg:handle arg:indices arg:apply_state arguments arg arg arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "moments", + "source_code": "@tf_export(v1=['nn.moments'])\n@dispatch.add_dispatch_support\ndef moments(x, axes, shift=None, name=None, keep_dims=None, keepdims=None):\n keep_dims = deprecated_argument_lookup('keepdims', keepdims, 'keep_dims', keep_dims)\n if keep_dims is None:\n keep_dims = False\n with ops.name_scope(name, 'moments', [x, axes]):\n y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x\n mean = math_ops.reduce_mean(y, axes, keepdims=True, name='mean')\n variance = math_ops.reduce_mean(math_ops.squared_difference(y, array_ops.stop_gradient(mean)), axes, keepdims=True, name='variance')\n if not keep_dims:\n mean = array_ops.squeeze(mean, axes)\n variance = array_ops.squeeze(variance, axes)\n if x.dtype == dtypes.float16:\n return (math_ops.cast(mean, dtypes.float16), math_ops.cast(variance, dtypes.float16))\n else:\n return (mean, variance)", + "docstring": "Calculate the mean and variance of . The mean and variance are calculated by aggregating the contents of across . If is 1-D and this is just the mean and variance of a vector. Note: shift is currently not used; the true mean is computed and used. When using these moments for batch normalization (see ): * for so-called \"global normalization\", used with convolutional filters with shape , pass . * for simple batch normalization pass (batch only). Args: x: A . axes: Array of ints. Axes along which to compute mean and variance. shift: Not used in the current implementation name: Name used to scope the operations that compute the moments. keep_dims: produce moments with the same dimensionality as the input. keepdims: Alias to keep_dims. Returns: Two objects: and .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_impl.py", + "ast_data": "FunctionDef name:moments arg:x arg:axes arg:shift arg:name arg:keep_dims arg:keepdims arguments arg arg arg arg arg arg Assign Call If Compare Assign With Call Assign Compare Call Assign Call Assign Call Call Call If Assign Call Assign Call If Compare Return return:yes Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "clone", + "source_code": "def clone(self) -> Image:\n return Image(self.data.clone(), self.pixel_format, self.layout)", + "docstring": "Return a copy of the image.", + "type": "method", + "file_path": "kornia\\kornia\\image\\image.py", + "ast_data": "FunctionDef name:clone arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_handle_deferred_dependencies", + "source_code": "def _handle_deferred_dependencies(self, name, trackable):\n self._maybe_initialize_trackable()\n trackable._maybe_initialize_trackable()\n deferred_dependencies_list = self._deferred_dependencies.pop(name, ())\n for checkpoint_position in sorted(deferred_dependencies_list, key=lambda restore: restore.checkpoint.restore_uid, reverse=True):\n checkpoint_position.restore(trackable)\n for name_based_restore in sorted(self._self_name_based_restores, key=lambda checkpoint: checkpoint.restore_uid, reverse=True):\n trackable._name_based_attribute_restore(name_based_restore)", + "docstring": "Pop and load any deferred checkpoint restores into . This method does not add a new dependency on , but it does check if any outstanding/deferred dependencies have been queued waiting for this dependency to be added (matched based on ). If so, and its dependencies are restored. The restorations are considered fulfilled and so are deleted. is more appropriate for adding a normal/unconditional dependency, and includes handling for deferred restorations. This method allows objects such as to use the same restoration logic while managing conditional dependencies themselves, by overriding and to change the object's dependencies based on the context it is saved/restored in (a single optimizer instance can have state associated with multiple graphs). Args: name: The name of the dependency within this object (), used to match with values saved in a checkpoint. trackable: The Trackable object to restore (inheriting from ).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py", + "ast_data": "FunctionDef name:_handle_deferred_dependencies arg:self arg:name arg:trackable arguments arg arg arg Call Call Assign Call For Call arguments arg Call For Call arguments arg Call" + }, + { + "library": "tensorflow", + "name": "_RNNCellWrapperV2", + "source_code": "class _RNNCellWrapperV2(recurrent.AbstractRNNCell):\n\n def __init__(self, cell, *args, **kwargs):\n super(_RNNCellWrapperV2, self).__init__(*args, **kwargs)\n self.cell = cell\n cell_call_spec = tf_inspect.getfullargspec(cell.call)\n self._expects_training_arg = 'training' in cell_call_spec.args or cell_call_spec.varkw is not None\n\n def call(self, inputs, state, **kwargs):\n return self._call_wrapped_cell(inputs, state, cell_call_fn=self.cell.call, **kwargs)\n\n def build(self, inputs_shape):\n self.cell.build(inputs_shape)\n self.built = True\n\n def get_config(self):\n config = {'cell': {'class_name': self.cell.__class__.__name__, 'config': self.cell.get_config()}}\n base_config = super(_RNNCellWrapperV2, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n config = config.copy()\n from tensorflow.python.keras.layers.serialization import deserialize as deserialize_layer\n cell = deserialize_layer(config.pop('cell'), custom_objects=custom_objects)\n return cls(cell, **config)", + "docstring": "Base class for cells wrappers V2 compatibility. This class along with allows to define wrappers that are compatible with V1 and V2, and defines helper methods for this purpose.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\rnn_cell_wrapper_v2.py", + "ast_data": "ClassDef name:_RNNCellWrapperV2 FunctionDef name:__init__ arg:self arg:cell arguments arg arg arg arg Call Call Assign Assign Call Assign BoolOp Compare Compare FunctionDef name:call arg:self arg:inputs arg:state arguments arg arg arg arg Return return:yes Call FunctionDef name:build arg:self arg:inputs_shape arguments arg arg Call Assign FunctionDef name:get_config arg:self arguments arg Assign Call Assign Call Call Return return:yes Call Call Call Call Call FunctionDef name:from_config arg:cls arg:config arg:custom_objects arguments arg arg arg Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_is_onnx_constant", + "source_code": "def _is_onnx_constant(value: _C.Value):\n return value.node().kind() == 'onnx::Constant'", + "docstring": "Whether a Value is an ONNX constant.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\symbolic_helper.py", + "ast_data": "FunctionDef name:_is_onnx_constant arg:value arguments arg Return return:yes Compare Call Call" + }, + { + "library": "tensorflow", + "name": "TrainingLoop", + "source_code": "class TrainingLoop(object):\n\n def fit(self, model, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, **kwargs):\n raise NotImplementedError()\n\n def evaluate(self, model, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, **kwargs):\n raise NotImplementedError()\n\n def predict(self, model, x, batch_size=None, verbose=0, steps=None, callbacks=None, **kwargs):\n raise NotImplementedError()", + "docstring": "TrainingLoop is a wrapper class around the training logic. This class is trying to encapsulate the different logic of fit/eval/predict with regard to different data input and model condition. Note that TrainingLoop is stateless, which means it doesn't contain any internal field and can be reused with different model and inputs.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", + "ast_data": "ClassDef name:TrainingLoop FunctionDef name:fit arg:self arg:model arg:x arg:y arg:batch_size arg:epochs arg:verbose arg:callbacks arg:validation_split arg:validation_data arg:shuffle arg:class_weight arg:sample_weight arg:initial_epoch arg:steps_per_epoch arg:validation_steps arg:validation_freq arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Raise Call FunctionDef name:evaluate arg:self arg:model arg:x arg:y arg:batch_size arg:verbose arg:sample_weight arg:steps arg:callbacks arguments arg arg arg arg arg arg arg arg arg arg Raise Call FunctionDef name:predict arg:self arg:model arg:x arg:batch_size arg:verbose arg:steps arg:callbacks arguments arg arg arg arg arg arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "create_state", + "source_code": "def create_state(self, state_manager):\n pass", + "docstring": "Uses the to create state for the FeatureColumn. Args: state_manager: A to create / access resources such as lookup tables and variables.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2_types.py", + "ast_data": "FunctionDef name:create_state arg:self arg:state_manager arguments arg arg" + }, + { + "library": "tensorflow", + "name": "convolution_kernel", + "source_code": "def convolution_kernel(self, name='convolution_kernel'):\n with self._name_scope(name):\n h = self._ifft(_to_complex(self.spectrum))\n return math_ops.cast(h, self.dtype)", + "docstring": "Convolution kernel corresponding to . The dimensional DFT of this kernel is the frequency domain spectrum of this operator. Args: name: A name to give this . Returns: with .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_circulant.py", + "ast_data": "FunctionDef name:convolution_kernel arg:self arg:name arguments arg arg With Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "concentration", + "source_code": "@property\ndef concentration(self):\n return self._concentration", + "docstring": "Concentration parameter; expected prior counts for that coordinate.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet_multinomial.py", + "ast_data": "FunctionDef name:concentration arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "axis_aligned_extrema", + "source_code": "def axis_aligned_extrema(self):\n n = self.degree\n if n <= 1:\n return (np.array([]), np.array([]))\n Cj = self.polynomial_coefficients\n dCj = np.arange(1, n + 1)[:, None] * Cj[1:]\n dims = []\n roots = []\n for i, pi in enumerate(dCj.T):\n r = np.roots(pi[::-1])\n roots.append(r)\n dims.append(np.full_like(r, i))\n roots = np.concatenate(roots)\n dims = np.concatenate(dims)\n in_range = np.isreal(roots) & (roots >= 0) & (roots <= 1)\n return (dims[in_range], np.real(roots)[in_range])", + "docstring": "Return the dimension and location of the curve's interior extrema. The extrema are the points along the curve where one of its partial derivatives is zero. Returns ------- dims : array of int Index :math: of the partial derivative which is zero at each interior extrema. dzeros : array of float Of same size as dims. The :math: such that :math:", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\bezier.py", + "ast_data": "FunctionDef name:axis_aligned_extrema arg:self arguments arg Assign If Compare Return return:yes Call Call Assign Assign Call Assign Assign For Call Assign Call Call Call Call Assign Call Assign Call Assign Call Compare Compare Return return:yes Call" + }, + { + "library": "pytorch", + "name": "from_tuple", + "source_code": "@staticmethod\ndef from_tuple(chunk_dims: tuple[int, ...]):\n args_chunk_spec = map_aggregate(chunk_dims, lambda dim: TensorChunkSpec(dim))\n return args_chunk_spec", + "docstring": "A helper for creating a tuple of from a tuple of chunk dimensions (int's). Example: >>> # xdoctest: +SKIP >>> # There are three positional arguments to the model, and >>> # we are chunking them along dimension 0, 0 and 1, respectively >>> args_chunk_spec = TensorChunkSpec.from_tuple((0, 0, 1))", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\microbatch.py", + "ast_data": "FunctionDef name:from_tuple arg:chunk_dims arguments arg Assign Call arguments arg Call Return return:yes" + }, + { + "library": "kornia", + "name": "spatial_kernel_embedding", + "source_code": "def spatial_kernel_embedding(kernel_type: str, grids: Dict[str, Tensor]) -> Tensor:\n factors = {'phi': 1.0, 'rho': pi / sqrt2, 'x': pi / 2, 'y': pi / 2}\n if kernel_type == 'cart':\n coeffs_ = 'xy'\n params_ = ['x', 'y']\n elif kernel_type == 'polar':\n coeffs_ = 'rhophi'\n params_ = ['phi', 'rho']\n keys = list(grids.keys())\n patch_size = grids[keys[0]].shape[-1]\n grids_normed = {k: v * factors[k] for k, v in grids.items()}\n grids_normed = {k: v.unsqueeze(0).unsqueeze(0).float() for k, v in grids_normed.items()}\n vm_a = VonMisesKernel(patch_size=patch_size, coeffs=COEFFS[coeffs_])\n vm_b = VonMisesKernel(patch_size=patch_size, coeffs=COEFFS[coeffs_])\n emb_a = vm_a(grids_normed[params_[0]]).squeeze()\n emb_b = vm_b(grids_normed[params_[1]]).squeeze()\n kron_order = get_kron_order(vm_a.d, vm_b.d)\n spatial_kernel = emb_a.index_select(0, kron_order[:, 0]) * emb_b.index_select(0, kron_order[:, 1])\n return spatial_kernel", + "docstring": "Compute embeddings for cartesian and polar parametrizations.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\mkd.py", + "ast_data": "FunctionDef name:spatial_kernel_embedding arg:kernel_type arg:grids arguments arg arg Assign If Compare Assign Assign If Compare Assign Assign Assign Call Call Assign Assign Call Assign Call Call Call Call Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pygame", + "name": "quit", + "source_code": "def quit():\n global _wq, _use_workers\n _wq.stop()\n _wq = None\n _use_workers = False", + "docstring": "cleans up everything.", + "type": "function", + "file_path": "pygame\\src_py\\threads\\__init__.py", + "ast_data": "FunctionDef name:quit arguments Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "pop", + "source_code": "def pop(self):\n self.stack.pop()", + "docstring": "Pop the stack.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:pop arg:self arguments arg Call" + }, + { + "library": "tensorflow", + "name": "variables_path", + "source_code": "@property\ndef variables_path(self):\n return self._variables_path", + "docstring": "Path to variable checkpoint files.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py", + "ast_data": "FunctionDef name:variables_path arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "g_topograph", + "source_code": "def g_topograph(self, x_min, X_min):\n x_min = np.array([x_min])\n self.Y = spatial.distance.cdist(x_min, X_min, 'euclidean')\n self.Z = np.argsort(self.Y, axis=-1)\n self.Ss = X_min[self.Z][0]\n self.minimizer_pool = self.minimizer_pool[self.Z]\n self.minimizer_pool = self.minimizer_pool[0]\n return self.Ss", + "docstring": "Returns the topographical vector stemming from the specified value `` with True boolean values indicating positive entries and False values indicating negative entries.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_shgo.py", + "ast_data": "FunctionDef name:g_topograph arg:self arg:x_min arg:X_min arguments arg arg arg Assign Call Assign Call Assign Call Assign Assign Assign Return return:yes" + }, + { + "library": "seaborn", + "name": "get_mapping", + "source_code": "def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n boolean_scale = isinstance(scale, Boolean)\n order = getattr(scale, 'order', [True, False] if boolean_scale else None)\n levels = categorical_order(data, order)\n values = self._get_values(scale, levels)\n if boolean_scale:\n values = values[::-1]\n\n def mapping(x):\n ixs = np.asarray(np.nan_to_num(x), np.intp)\n return [values[ix] if np.isfinite(x_i) else self.null_value for x_i, ix in zip(x, ixs)]\n return mapping", + "docstring": "Define mapping as lookup into list of object values.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\properties.py", + "ast_data": "FunctionDef name:get_mapping arg:self arg:scale arg:data arguments arg arg arg Assign Call Assign Call Assign Call Assign Call If Assign FunctionDef name:mapping arg:x arguments arg Assign Call Call Return return:yes Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_is_variable_op", + "source_code": "def _is_variable_op(op):\n return op in _VARIABLE_OPS", + "docstring": "Returns true if 'op' refers to a Variable node.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_util_impl.py", + "ast_data": "FunctionDef name:_is_variable_op arg:op arguments arg Return return:yes Compare" + }, + { + "library": "kornia", + "name": "invert", + "source_code": "def invert(probability: float, _: int) -> OperationBase:\n return Invert(probability)", + "docstring": "Return invert op.", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\auto\\autoaugment\\ops.py", + "ast_data": "FunctionDef name:invert arg:probability arg:_ arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "Problem07", + "source_code": "class Problem07(Benchmark):\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n self._bounds = [(2.7, 7.5)]\n self.global_optimum = 5.19978\n self.fglob = -1.6013\n\n def fun(self, x, *args):\n self.nfev += 1\n x = x[0]\n return sin(x) + sin(10.0 / 3.0 * x) + log(x) - 0.84 * x + 3", + "docstring": "Univariate Problem07 objective function. This class defines the Univariate Problem07 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem07}}(x) = \\sin(x) + \\sin \\left(\\frac{10}{3}x \\right) + \\log(x) - 0.84x + 3 Bound constraints: :math: .. figure:: figures/Problem07.png :alt: Univariate Problem07 function :align: center **Univariate Problem07 function** *Global optimum*: :math: for :math:", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py", + "ast_data": "ClassDef name:Problem07 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call Call" + }, + { + "library": "kornia", + "name": "_quantize", + "source_code": "def _quantize(input: Tensor, jpeg_quality: Tensor, quantization_table: Tensor) -> Tensor:\n quantization_table_scaled: Tensor = quantization_table[:, None] * _jpeg_quality_to_scale(jpeg_quality)[:, None, None, None]\n quantization_table = differentiable_polynomial_floor(differentiable_clipping((quantization_table_scaled + 50.0) / 100.0, 1, 255))\n output: Tensor = input / quantization_table\n output = differentiable_polynomial_rounding(output)\n return output", + "docstring": "Perform quantization. Args: input (Tensor): Input tensor of the shape :math:. jpeg_quality (Tensor): Compression strength to be applied, shape is :math:. quantization_table (Tensor): Quantization table of the shape :math: or :math:. Returns: output (Tensor): Quantized output tensor of the shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\enhance\\jpeg.py", + "ast_data": "FunctionDef name:_quantize arg:input arg:jpeg_quality arg:quantization_table arguments arg arg arg Call Assign Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, profile_datum_list, time_unit=cli_shared.TIME_UNIT_US):\n self._profile_datum_list = profile_datum_list\n self.formatted_start_time = [datum.start_time for datum in profile_datum_list]\n self.formatted_op_time = [cli_shared.time_to_readable_str(datum.op_time, force_time_unit=time_unit) for datum in profile_datum_list]\n self.formatted_exec_time = [cli_shared.time_to_readable_str(datum.node_exec_stats.all_end_rel_micros, force_time_unit=time_unit) for datum in profile_datum_list]\n self._column_names = ['Node', 'Op Type', 'Start Time (us)', 'Op Time (%s)' % time_unit, 'Exec Time (%s)' % time_unit, 'Filename:Lineno(function)']\n self._column_sort_ids = [SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE, SORT_OPS_BY_START_TIME, SORT_OPS_BY_OP_TIME, SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_LINE]", + "docstring": "Constructor. Args: profile_datum_list: List of objects. time_unit: must be in cli_shared.TIME_UNITS.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\profile_analyzer_cli.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:profile_datum_list arg:time_unit arguments arg arg arg Assign Assign Assign Call Assign Call Assign Assign" + }, + { + "library": "numpy", + "name": "_can_target", + "source_code": "def _can_target(cmd, arch):\n newcmd = cmd[:]\n fid, filename = tempfile.mkstemp(suffix='.f')\n os.close(fid)\n try:\n d = os.path.dirname(filename)\n output = os.path.splitext(filename)[0] + '.o'\n try:\n newcmd.extend(['-arch', arch, '-c', filename])\n p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d)\n p.communicate()\n return p.returncode == 0\n finally:\n if os.path.exists(output):\n os.remove(output)\n finally:\n os.remove(filename)", + "docstring": "Return true if the architecture supports the -arch flag", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\fcompiler\\gnu.py", + "ast_data": "FunctionDef name:_can_target arg:cmd arg:arch arguments arg arg Assign Assign Call Call Try Assign Call Assign Call Try Call Assign Call Call Return return:yes Compare If Call Call Call" + }, + { + "library": "numpy", + "name": "searchsorted", + "source_code": "@array_function_dispatch(_searchsorted_dispatcher)\ndef searchsorted(a, v, side='left', sorter=None):\n return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)", + "docstring": "Find indices where elements should be inserted to maintain order. Find the indices into a sorted array such that, if the corresponding elements in were inserted before the indices, the order of would be preserved. Assuming that is sorted: ====== ============================ returned index satisfies ====== ============================ left `sorteraa` itself: >>> a = np.array([40, 10, 20, 30]) >>> sorter = np.argsort(a) >>> sorter array([1, 2, 3, 0]) # Indices that would sort the array 'a' >>> result = np.searchsorted(a, 25, sorter=sorter) >>> result 2 >>> a[sorter[result]] 30 # The element at index 2 of the sorted array is 30.", + "type": "function", + "file_path": "numpy\\numpy\\_core\\fromnumeric.py", + "ast_data": "FunctionDef name:searchsorted arg:a arg:v arg:side arg:sorter arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "draw_path_collection", + "source_code": "def draw_path_collection(self, gc, master_transform, paths, all_transforms, offsets, offset_trans, facecolors, edgecolors, linewidths, linestyles, antialiaseds, urls, offset_position, *, hatchcolors=None):\n path_ids = self._iter_collection_raw_paths(master_transform, paths, all_transforms)\n if hatchcolors is None:\n hatchcolors = []\n for xo, yo, path_id, gc0, rgbFace in self._iter_collection(gc, list(path_ids), offsets, offset_trans, facecolors, edgecolors, linewidths, linestyles, antialiaseds, urls, offset_position, hatchcolors=hatchcolors):\n path, transform = path_id\n if xo != 0 or yo != 0:\n transform = transform.frozen()\n transform.translate(xo, yo)\n self.draw_path(gc0, path, transform, rgbFace)", + "docstring": "Draw a collection of *paths*. Each path is first transformed by the corresponding entry in *all_transforms* (a list of (3, 3) matrices) and then by *master_transform*. They are then translated by the corresponding entry in *offsets*, which has been first transformed by *offset_trans*. *facecolors*, *edgecolors*, *linewidths*, *linestyles*, *antialiased* and *hatchcolors* are lists that set the corresponding properties. .. versionadded:: 3.11 Allow *hatchcolors* to be specified. *offset_position* is unused now, but the argument is kept for backwards compatibility. The base (fallback) implementation makes multiple calls to . Backends may want to override this in order to render each set of path data only once, and then reference that path multiple times with the different offsets, colors, styles etc. The generator methods and are provided to help with (and standardize) the implementation across backends. It is highly recommended to use those generators, so that changes to the behavior of can be made globally.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:draw_path_collection arg:self arg:gc arg:master_transform arg:paths arg:all_transforms arg:offsets arg:offset_trans arg:facecolors arg:edgecolors arg:linewidths arg:linestyles arg:antialiaseds arg:urls arg:offset_position arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Call If Compare Assign For Call Call Assign If BoolOp Compare Compare Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "register_load_state_dict_pre_hook", + "source_code": "def register_load_state_dict_pre_hook(self, hook):\n return self._register_load_state_dict_pre_hook(hook, with_module=True)", + "docstring": "Register a pre-hook to be run before module's :meth: is called. It should have the following signature:: hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) -> None # noqa: B950 Arguments: hook (Callable): Callable hook that will be invoked before loading the state dict.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:register_load_state_dict_pre_hook arg:self arg:hook arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "set_shape", + "source_code": "def set_shape(self, shape):\n self._ref().set_shape(shape)\n self.value().set_shape(shape)", + "docstring": "Overrides the shape for this variable. Args: shape: the representing the overridden shape.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", + "ast_data": "FunctionDef name:set_shape arg:self arg:shape arguments arg arg Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "_get_neighbor_feat_idx", + "source_code": "def _get_neighbor_feat_idx(self, n_features, feat_idx, abs_corr_mat):\n if self.n_nearest_features is not None and self.n_nearest_features < n_features:\n p = abs_corr_mat[:, feat_idx]\n neighbor_feat_idx = self.random_state_.choice(np.arange(n_features), self.n_nearest_features, replace=False, p=p)\n else:\n inds_left = np.arange(feat_idx)\n inds_right = np.arange(feat_idx + 1, n_features)\n neighbor_feat_idx = np.concatenate((inds_left, inds_right))\n return neighbor_feat_idx", + "docstring": "Get a list of other features to predict . If is less than or equal to the total number of features, then use a probability proportional to the absolute correlation between and each other feature to randomly choose a subsample of the other features (without replacement). Parameters ---------- n_features : int Number of features in . feat_idx : int Index of the feature currently being imputed. abs_corr_mat : ndarray, shape (n_features, n_features) Absolute correlation matrix of . The diagonal has been zeroed out and each feature has been normalized to sum to 1. Can be None. Returns ------- neighbor_feat_idx : array-like The features to use to impute .", + "type": "method", + "file_path": "scikit-learn\\sklearn\\impute\\_iterative.py", + "ast_data": "FunctionDef name:_get_neighbor_feat_idx arg:self arg:n_features arg:feat_idx arg:abs_corr_mat arguments arg arg arg arg If BoolOp Compare Compare Assign Assign Call Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "sphinx", + "name": "merge_domaindata", + "source_code": "def merge_domaindata(self, docnames: Set[str], otherdata: dict[str, Any]) -> None:\n msg = f'merge_domaindata must be implemented in {self.__class__} to be able to do parallel builds!'\n raise NotImplementedError(msg)", + "docstring": "Merge in data regarding *docnames* from a different domaindata inventory (coming from a subprocess in parallel builds).", + "type": "method", + "file_path": "sphinx\\sphinx\\domains\\__init__.py", + "ast_data": "FunctionDef name:merge_domaindata arg:self arg:docnames arg:otherdata arguments arg arg arg Assign Raise Call" + }, + { + "library": "tensorflow", + "name": "from_config", + "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None):\n with generic_utils.SharedObjectLoadingScope():\n input_tensors, output_tensors, created_layers = reconstruct_from_config(config, custom_objects)\n model = cls(inputs=input_tensors, outputs=output_tensors, name=config.get('name'))\n connect_ancillary_layers(model, created_layers)\n return model", + "docstring": "Instantiates a Model from its config (output of ). Args: config: Model config dictionary. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A model instance. Raises: ValueError: In case of improperly formatted config dict.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py", + "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arguments arg arg arg With Call Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "enable_xla_sharding_for_resource_variables", + "source_code": "def enable_xla_sharding_for_resource_variables():\n global _XLA_SHARDING_FOR_RESOURCE_VARIABLES\n _XLA_SHARDING_FOR_RESOURCE_VARIABLES = True\n if context_safe() is not None:\n context_safe().xla_sharding_for_resource_variables = True", + "docstring": "Enables support for annotating TF2 ResourceVariables with XLA sharding. This allows placing XLA sharding annotations on the TF2 ResourceVariable python object and inserts an XlaShardingOp with the annotation whenever a ReadVariableOp is created.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:enable_xla_sharding_for_resource_variables arguments Assign If Compare Call Assign Call" + }, + { + "library": "scipy", + "name": "upcast", + "source_code": "def upcast(*args):\n t = _upcast_memo.get(hash(args))\n if t is not None:\n return t\n upcast = np.result_type(*args)\n for t in supported_dtypes:\n if np.can_cast(upcast, t):\n _upcast_memo[hash(args)] = t\n return t\n raise TypeError(f'no supported conversion for types: {args!r}')", + "docstring": "Returns the nearest supported sparse dtype for the combination of one or more types. upcast(t0, t1, ..., tn) -> T where T is a supported dtype Examples -------- >>> from scipy.sparse._sputils import upcast >>> upcast('int32') >>> upcast('bool') >>> upcast('int32','float32') >>> upcast('bool',complex,float)", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\_sputils.py", + "ast_data": "FunctionDef name:upcast arguments arg Assign Call Call If Compare Return return:yes Assign Call For If Call Assign Call Return return:yes Raise Call" + }, + { + "library": "pytorch", + "name": "wait_all", + "source_code": "def wait_all(futures: list[Future]) -> list:\n return [fut.wait() for fut in torch._C._collect_all(cast(list[torch._C.Future], futures)).wait()]", + "docstring": "Waits for all provided futures to be complete, and returns the list of completed values. If any of the futures encounters an error, the method will exit early and report the error not waiting for other futures to complete. Args: futures (list): a list of :class: object. Returns: A list of the completed :class: results. This method will throw an error if `~torch.futures.Future` throws.", + "type": "function", + "file_path": "pytorch\\torch\\futures\\__init__.py", + "ast_data": "FunctionDef name:wait_all arg:futures arguments arg Return return:yes Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "set_image_mode", + "source_code": "def set_image_mode(self, mode):\n _api.check_in_list(['full', 'diff'], mode=mode)\n if self._current_image_mode != mode:\n self._current_image_mode = mode\n self.handle_send_image_mode(None)", + "docstring": "Set the image mode for any subsequent images which will be sent to the clients. The modes may currently be either 'full' or 'diff'. Note: diff images may not contain transparency, therefore upon draw this mode may be changed if the resulting image has any transparent component.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_webagg_core.py", + "ast_data": "FunctionDef name:set_image_mode arg:self arg:mode arguments arg arg Call If Compare Assign Call" + }, + { + "library": "matplotlib", + "name": "shrink", + "source_code": "def shrink(self) -> None:\n self.size += 1", + "docstring": "Shrinks one level smaller. There are only three levels of sizes, after which things will no longer get smaller.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py", + "ast_data": "FunctionDef name:shrink arg:self arguments arg" + }, + { + "library": "scipy", + "name": "_angular_acceleration_nonlinear_term", + "source_code": "def _angular_acceleration_nonlinear_term(rotvecs, rotvecs_dot):\n norm = np.linalg.norm(rotvecs, axis=1)\n dp = np.sum(rotvecs * rotvecs_dot, axis=1)\n cp = np.cross(rotvecs, rotvecs_dot)\n ccp = np.cross(rotvecs, cp)\n dccp = np.cross(rotvecs_dot, cp)\n k1 = np.empty_like(norm)\n k2 = np.empty_like(norm)\n k3 = np.empty_like(norm)\n mask = norm > 0.0001\n nm = norm[mask]\n k1[mask] = (-nm * np.sin(nm) - 2 * (np.cos(nm) - 1)) / nm ** 4\n k2[mask] = (-2 * nm + 3 * np.sin(nm) - nm * np.cos(nm)) / nm ** 5\n k3[mask] = (nm - np.sin(nm)) / nm ** 3\n mask = ~mask\n nm = norm[mask]\n k1[mask] = 1 / 12 - nm ** 2 / 180\n k2[mask] = -1 / 60 + nm ** 2 / 12604\n k3[mask] = 1 / 6 - nm ** 2 / 120\n dp = dp[:, None]\n k1 = k1[:, None]\n k2 = k2[:, None]\n k3 = k3[:, None]\n return dp * (k1 * cp + k2 * ccp) + k3 * dccp", + "docstring": "Compute the non-linear term in angular acceleration. The angular acceleration contains a quadratic term with respect to the derivative of the rotation vector. This function computes that. Parameters ---------- rotvecs : ndarray, shape (n, 3) Set of rotation vectors. rotvecs_dot : ndarray, shape (n, 3) Set of rotation vector derivatives. Returns ------- ndarray, shape (n, 3)", + "type": "function", + "file_path": "scipy\\scipy\\spatial\\transform\\_rotation_spline.py", + "ast_data": "FunctionDef name:_angular_acceleration_nonlinear_term arg:rotvecs arg:rotvecs_dot arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Compare Assign Assign Call Call Assign Call Call Assign Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Return return:yes" + }, + { + "library": "django", + "name": "UpdateError", + "source_code": "class UpdateError(Exception):\n pass", + "docstring": "Occurs if Django tries to update a session that was deleted.", + "type": "class", + "file_path": "django\\django\\contrib\\sessions\\backends\\base.py", + "ast_data": "ClassDef name:UpdateError" + }, + { + "library": "tensorflow", + "name": "cluster_spec", + "source_code": "def cluster_spec(self):\n return self._cluster_spec", + "docstring": "Returns the ClusterSpec passed into the constructor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py", + "ast_data": "FunctionDef name:cluster_spec arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "_wrap_transform_fast_result", + "source_code": "@final\ndef _wrap_transform_fast_result(self, result: NDFrameT) -> NDFrameT:\n obj = self._obj_with_exclusions\n ids = self._grouper.ids\n result = result.reindex(self._grouper.result_index, axis=0)\n if self.obj.ndim == 1:\n out = algorithms.take_nd(result._values, ids)\n output = obj._constructor(out, index=obj.index, name=obj.name)\n else:\n new_ax = result.index.take(ids)\n output = result._reindex_with_indexers({0: (new_ax, ids)}, allow_dups=True)\n output = output.set_axis(obj.index, axis=0)\n return output", + "docstring": "Fast transform path for aggregations.", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\groupby.py", + "ast_data": "FunctionDef name:_wrap_transform_fast_result arg:self arg:result arguments arg arg Assign Assign Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "ClassifierTags", + "source_code": "@dataclass(slots=True)\nclass ClassifierTags:\n poor_score: bool = False\n multi_class: bool = True\n multi_label: bool = False", + "docstring": "Tags for the classifier. Parameters ---------- poor_score : bool, default=False Whether the estimator fails to provide a \"reasonable\" test-set score, which currently for classification is an accuracy of 0.83 on `multi-classmulti-label` in the glossary.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\utils\\_tags.py", + "ast_data": "ClassDef name:ClassifierTags Call" + }, + { + "library": "tensorflow", + "name": "init_restore_or_wait_for_variables", + "source_code": "def init_restore_or_wait_for_variables():\n backend._initialize_variables(backend._get_session())", + "docstring": "Initialize or restore variables or wait for variables to be initialized.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py", + "ast_data": "FunctionDef name:init_restore_or_wait_for_variables arguments Call Call" + }, + { + "library": "scipy", + "name": "__call__", + "source_code": "def __call__(self, x):\n x, x_shape = self._prepare_x(x)\n y = self._evaluate(x)\n return self._finish_y(y, x_shape)", + "docstring": "Evaluate the interpolant Parameters ---------- x : array_like Point or points at which to evaluate the interpolant. Returns ------- y : array_like Interpolated values. Shape is determined by replacing the interpolation axis in the original array with the shape of . Notes ----- Input values must be convertible to values like or .", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_polyint.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "cryptography", + "name": "tobytes", + "source_code": "def tobytes(self) -> bytes:\n buf = memoryview(bytearray(self.size()))\n self.render(buf)\n return buf.tobytes()", + "docstring": "Return as bytes", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py", + "ast_data": "FunctionDef name:tobytes arg:self arguments arg Assign Call Call Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "__from_arrow__", + "source_code": "def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> ArrowExtensionArray:\n array_class = self.construct_array_type()\n arr = array.cast(self.pyarrow_dtype, safe=True)\n return array_class(arr)", + "docstring": "Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray.", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", + "ast_data": "FunctionDef name:__from_arrow__ arg:self arg:array arguments arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "current_stream", + "source_code": "def current_stream(device: Optional[_device_t]=None) -> Stream:\n _lazy_init()\n streamdata = torch._C._xpu_getCurrentStream(_get_device_index(device, optional=True))\n return Stream(stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2])", + "docstring": "Return the currently selected :class: for a given device. Args: device (torch.device or int, optional): selected device. Returns the currently selected :class: for the current device, given by :func:, if :attr: is `` (default).", + "type": "function", + "file_path": "pytorch\\torch\\xpu\\__init__.py", + "ast_data": "FunctionDef name:current_stream arg:device arguments arg Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "relu6", + "source_code": "@tf_export('nn.relu6')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef relu6(features, name=None):\n with ops.name_scope(name, 'Relu6', [features]) as name:\n features = ops.convert_to_tensor(features, name='features')\n return gen_nn_ops.relu6(features, name=name)", + "docstring": "Computes Rectified Linear 6: . In comparison with , relu6 activation functions have shown to empirically perform better under low-precision conditions (e.g. fixed point inference) by encouraging the model to learn sparse features earlier. Source: [Convolutional Deep Belief Networks on CIFAR-10: Krizhevsky et al., 2010]( For example: >>> x = tf.constant([-3.0, -1.0, 0.0, 6.0, 10.0], dtype=tf.float32) >>> y = tf.nn.relu6(x) >>> y.numpy() array([0., 0., 0., 6., 6.], dtype=float32) Args: features: A with type , , , , , , or . name: A name for the operation (optional). Returns: A with the same type as . References: Convolutional Deep Belief Networks on CIFAR-10: Krizhevsky et al., 2010 ([pdf](", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", + "ast_data": "FunctionDef name:relu6 arg:features arg:name arguments arg arg With Call Assign Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "inverse_flattening", + "source_code": "@property\ndef inverse_flattening(self):\n return capi.invflattening(self.ptr, byref(c_int()))", + "docstring": "Return the Inverse Flattening for this Spatial Reference.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py", + "ast_data": "FunctionDef name:inverse_flattening arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "_rewrite_cache_key_for_mega_cache", + "source_code": "@staticmethod\ndef _rewrite_cache_key_for_mega_cache(original_key: str) -> str:\n if not original_key.startswith('mast:'):\n return original_key\n if (new_key := get_cache_key()) is not None:\n return new_key\n return original_key", + "docstring": "The PGO cache artifact key for a MAST job contains the job name and the version. When we want to use the cache artifact on a different MAST job, we need to update the key to use the new MAST job's name and version.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\pgo.py", + "ast_data": "FunctionDef name:_rewrite_cache_key_for_mega_cache arg:original_key arguments arg If Call Return return:yes If Compare Call Return return:yes Return return:yes" + }, + { + "library": "numpy", + "name": "_array_str_implementation", + "source_code": "def _array_str_implementation(a, max_line_width=None, precision=None, suppress_small=None, array2string=array2string):\n if format_options.get()['legacy'] <= 113 and a.shape == () and (not a.dtype.names):\n return str(a.item())\n if a.shape == ():\n return _guarded_repr_or_str(np.ndarray.__getitem__(a, ()))\n return array2string(a, max_line_width, precision, suppress_small, ' ', '')", + "docstring": "Internal version of array_str() that allows overriding array2string.", + "type": "function", + "file_path": "numpy\\numpy\\_core\\arrayprint.py", + "ast_data": "FunctionDef name:_array_str_implementation arg:a arg:max_line_width arg:precision arg:suppress_small arg:array2string arguments arg arg arg arg arg If BoolOp Compare Call Compare Return return:yes Call Call If Compare Return return:yes Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "take_data", + "source_code": "def take_data(self):\n return self.values", + "docstring": "return the values", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:take_data arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "contains_saved_model", + "source_code": "@tf_export('saved_model.contains_saved_model', v1=[])\ndef contains_saved_model(export_dir):\n if isinstance(export_dir, os.PathLike):\n export_dir = os.fspath(export_dir)\n return maybe_saved_model_directory(export_dir)", + "docstring": "Checks whether the provided export directory could contain a SavedModel. Note that the method does not load any data by itself. If the method returns , the export directory definitely does not contain a SavedModel. If the method returns , the export directory may contain a SavedModel but provides no guarantee that it can be loaded. Args: export_dir: Absolute path to possible export location. For example, '/my/foo/model'. Returns: True if the export directory contains SavedModel files, False otherwise.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py", + "ast_data": "FunctionDef name:contains_saved_model arg:export_dir arguments arg If Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "is_feedable", + "source_code": "def is_feedable(self, tensor) -> bool:\n return tensor not in self._unfeedable_tensors", + "docstring": "Returns if and only if is feedable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:is_feedable arg:self arg:tensor arguments arg arg Return return:yes Compare" + }, + { + "library": "pandas", + "name": "_calc_max_rows_fitted", + "source_code": "def _calc_max_rows_fitted(self) -> int | None:\n max_rows: int | None\n if self._is_in_terminal():\n _, height = get_terminal_size()\n if self.max_rows == 0:\n return height - self._get_number_of_auxiliary_rows()\n if self._is_screen_short(height):\n max_rows = height\n else:\n max_rows = self.max_rows\n else:\n max_rows = self.max_rows\n return self._adjust_max_rows(max_rows)", + "docstring": "Number of rows with data fitting the screen.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\format.py", + "ast_data": "FunctionDef name:_calc_max_rows_fitted arg:self arguments arg If Call Assign Call If Compare Return return:yes Call If Call Assign Assign Assign Return return:yes Call" + }, + { + "library": "kornia", + "name": "width", + "source_code": "@property\ndef width(self) -> int:\n return int(self.layout.image_size.width)", + "docstring": "Return the image width (rows).", + "type": "method", + "file_path": "kornia\\kornia\\image\\image.py", + "ast_data": "FunctionDef name:width arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_get_window_indexer", + "source_code": "def _get_window_indexer(self) -> GroupbyIndexer:\n rolling_indexer: type[BaseIndexer]\n indexer_kwargs: dict[str, Any] | None = None\n index_array = self._index_array\n if isinstance(self.window, BaseIndexer):\n rolling_indexer = type(self.window)\n indexer_kwargs = self.window.__dict__.copy()\n assert isinstance(indexer_kwargs, dict)\n indexer_kwargs.pop('index_array', None)\n window = self.window\n elif self._win_freq_i8 is not None:\n rolling_indexer = VariableWindowIndexer\n window = self._win_freq_i8\n else:\n rolling_indexer = FixedWindowIndexer\n window = self.window\n window_indexer = GroupbyIndexer(index_array=index_array, window_size=window, groupby_indices=self._grouper.indices, window_indexer=rolling_indexer, indexer_kwargs=indexer_kwargs)\n return window_indexer", + "docstring": "Return an indexer class that will compute the window start and end bounds Returns ------- GroupbyIndexer", + "type": "method", + "file_path": "pandas\\pandas\\core\\window\\rolling.py", + "ast_data": "FunctionDef name:_get_window_indexer arg:self arguments arg Assign If Call Assign Call Assign Call Call Call Assign If Compare Assign Assign Assign Assign Assign Call Return return:yes" + }, + { + "library": "django", + "name": "geographic", + "source_code": "@property\ndef geographic(self):\n return bool(capi.isgeographic(self.ptr))", + "docstring": "Return True if this SpatialReference is geographic (root node is GEOGCS).", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py", + "ast_data": "FunctionDef name:geographic arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "aliased_name_rest", + "source_code": "def aliased_name_rest(self, s, target):\n if target in self._NOT_LINKABLE:\n return f'``{s}``'\n aliases = ''.join((f' or :meth:`{a} <{target}>`' for a in sorted(self.aliasd.get(s, []))))\n return f':meth:`{s} <{target}>`{aliases}'", + "docstring": "Return 'PROPNAME or alias' if *s* has an alias, else return 'PROPNAME', formatted for reST. For example, for the line markerfacecolor property, which has an alias, return 'markerfacecolor or mfc' and for the transform property, which does not, return 'transform'.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:aliased_name_rest arg:self arg:s arg:target arguments arg arg arg If Compare Return return:yes Assign Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_get_scale_docs", + "source_code": "def _get_scale_docs():\n docs = []\n for name, scale_class in _scale_mapping.items():\n docstring = inspect.getdoc(scale_class.__init__) or ''\n docs.extend([f' {name!r}', '', textwrap.indent(docstring, ' ' * 8), ''])\n return '\\n'.join(docs)", + "docstring": "Helper function for generating docstrings related to scales.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\scale.py", + "ast_data": "FunctionDef name:_get_scale_docs arguments Assign For Call Assign BoolOp Call Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "names", + "source_code": "@property\ndef names(self) -> list[str] | None:\n return None", + "docstring": "Ordered list of field names, or None if there are no fields. This is for compatibility with NumPy arrays, and may be removed in the future.", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\base.py", + "ast_data": "FunctionDef name:names arg:self arguments arg Return return:no" + }, + { + "library": "pytorch", + "name": "unsqueeze", + "source_code": "@_onnx_symbolic('aten::unsqueeze')\n@symbolic_helper.parse_args('v', 'i')\ndef unsqueeze(g: jit_utils.GraphContext, self, dim):\n if dim < 0:\n rank = symbolic_helper._get_tensor_rank(self)\n if rank is not None:\n warnings.warn('ONNX export unsqueeze with negative axis ' + str(dim) + ' might cause the onnx model to be incorrect. ' + 'Negative axis is not supported in ONNX. ' + 'Axis is converted to ' + str(dim + rank + 1) + ' based on input shape at export time. ' + 'Passing an tensor of different rank in execution will be incorrect.')\n dim = dim + rank + 1\n else:\n return symbolic_helper._unimplemented('unsqueeze', 'negative axis with unknown input rank', self)\n return symbolic_helper._unsqueeze_helper(g, self, axes_i=[dim])", + "docstring": "Implement unsqueezing a pytorch tensor in ONNX by inserting a new dimension at the specified", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py", + "ast_data": "FunctionDef name:unsqueeze arg:g arg:self arg:dim arguments arg arg arg If Compare Assign Call If Compare Call Call Call Assign Return return:yes Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "create_local_plan", + "source_code": "@abc.abstractmethod\ndef create_local_plan(self) -> SavePlan:\n pass", + "docstring": "Compute the save plan for the current rank. This will be aggregated and passed to create_global_plan. Planner specific data can be passed through SavePlan::planner_data. This is called on all ranks.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py", + "ast_data": "FunctionDef name:create_local_plan arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "argmin", + "source_code": "@_apply_docstring_templates\ndef argmin(input: Union[Tensor, MaskedTensor], dim: Optional[int]=None, *, keepdim: Optional[bool]=False, dtype: Optional[DType]=None, mask: Optional[Tensor]=None) -> Tensor:\n if dtype is None:\n dtype = input.dtype\n mask_input = _combine_input_and_mask(argmin, input, mask)\n if mask_input.layout == torch.strided:\n return torch.argmin(mask_input, dim, bool(keepdim)).to(dtype=dtype)\n else:\n raise ValueError(f'masked argmin expects strided tensor (got {mask_input.layout} tensor)')", + "docstring": "{reduction_signature} {reduction_descr} {reduction_identity_dtype} {reduction_args} {reduction_example}", + "type": "function", + "file_path": "pytorch\\torch\\masked\\_ops.py", + "ast_data": "FunctionDef name:argmin arg:input arg:dim arguments arg arg arg arg arg If Compare Assign Assign Call If Compare Return return:yes Call Call Call Raise Call" + }, + { + "library": "matplotlib", + "name": "_scalar_vectorized", + "source_code": "def _scalar_vectorized(scalar, M):\n return scalar[:, np.newaxis, np.newaxis] * M", + "docstring": "Scalar product between scalars and matrices.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py", + "ast_data": "FunctionDef name:_scalar_vectorized arg:scalar arg:M arguments arg arg Return return:yes" + }, + { + "library": "kornia", + "name": "RgbToBgr", + "source_code": "class RgbToBgr(Module):\n ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n def forward(self, image: Tensor) -> Tensor:\n return rgb_to_bgr(image)", + "docstring": "Convert an image from RGB to BGR. The image data is assumed to be in the range of (0, 1). Returns: BGR version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 3, 4, 5) >>> bgr = RgbToBgr() >>> output = bgr(input) # 2x3x4x5", + "type": "class", + "file_path": "kornia\\kornia\\color\\rgb.py", + "ast_data": "ClassDef name:RgbToBgr FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_tick_iterators", + "source_code": "def get_tick_iterators(self, axes):\n v1, v2 = axes.get_ylim() if self.nth_coord == 0 else axes.get_xlim()\n if v1 > v2:\n side = {'left': 'right', 'right': 'left', 'top': 'bottom', 'bottom': 'top'}[self.side]\n else:\n side = self.side\n angle_tangent = dict(left=90, right=90, bottom=0, top=0)[side]\n\n def iter_major():\n for nth_coord, show_labels in [(self.nth_coord_ticks, True), (1 - self.nth_coord_ticks, False)]:\n gi = self.grid_helper._grid_info[['lon', 'lat'][nth_coord]]\n for tick in gi['ticks'][side]:\n yield (*tick['loc'], angle_tangent, tick['label'] if show_labels else '')\n return (iter_major(), iter([]))", + "docstring": "tick_loc, tick_angle, tick_label", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_helper_curvelinear.py", + "ast_data": "FunctionDef name:get_tick_iterators arg:self arg:axes arguments arg arg Assign Compare Call Call If Compare Assign Assign Assign Call FunctionDef name:iter_major arguments For Assign For Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_get_rng_state_offset", + "source_code": "def _get_rng_state_offset(device: Union[int, str, torch.device]='cuda') -> int:\n _lazy_init()\n final_device = _get_device(device)\n default_generator = _get_generator(final_device)\n return default_generator.get_offset()", + "docstring": "Return the random number generator state offset of the specified GPU. Args: device (torch.device or int, optional): The device to return the RNG state offset of. Default: ``, the current CUDA device). .. warning:: This function eagerly initializes CUDA.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:_get_rng_state_offset arg:device arguments arg Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_validate_estimator", + "source_code": "def _validate_estimator(self):\n super()._validate_estimator(default=DecisionTreeClassifier(max_depth=1))\n if self.algorithm != 'deprecated':\n warnings.warn(\"The parameter 'algorithm' is deprecated in 1.6 and has no effect. It will be removed in version 1.8.\", FutureWarning)\n if not has_fit_parameter(self.estimator_, 'sample_weight'):\n raise ValueError(f\"{self.estimator.__class__.__name__} doesn't support sample_weight.\")", + "docstring": "Check the estimator and set the estimator_ attribute.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py", + "ast_data": "FunctionDef name:_validate_estimator arg:self arguments arg Call Call Call If Compare Call If Call Raise Call" + }, + { + "library": "numpy", + "name": "lagadd", + "source_code": "def lagadd(c1, c2):\n return pu._add(c1, c2)", + "docstring": "Add one Laguerre series to another. Returns the sum of two Laguerre series + . The arguments are sequences of coefficients ordered from lowest order term to highest, i.e., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Laguerre series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the Laguerre series of their sum. See Also -------- lagsub, lagmulx, lagmul, lagdiv, lagpow Notes ----- Unlike multiplication, division, etc., the sum of two Laguerre series is a Laguerre series (without having to \"reproject\" the result onto the basis set) so addition, just like that of \"standard\" polynomials, is simply \"component-wise.\" Examples -------- >>> from numpy.polynomial.laguerre import lagadd >>> lagadd([1, 2, 3], [1, 2, 3, 4]) array([2., 4., 6., 4.])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\laguerre.py", + "ast_data": "FunctionDef name:lagadd arg:c1 arg:c2 arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "is_dim_div_by_target", + "source_code": "def is_dim_div_by_target(target: list[int], dim: list[DVar]):\n return BinConstraintD(BinConstraintD(dim, Prod(target), op_mod), 0, op_eq)", + "docstring": "Generate constraints to check if the input dimensions is divisible by the target dimensions Args: target: Target dimensions dim: Input dimensions Returns: Constraints to check divisibility", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", + "ast_data": "FunctionDef name:is_dim_div_by_target arg:target arg:dim arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "join_phase", + "source_code": "def join_phase(self, expected_version):\n active_version, this_rank = self.join_rendezvous(expected_version)\n state = json.loads(active_version.value)\n logger.info('Joined rendezvous version %s as rank %s. Full state: %s', state['version'], this_rank, state)\n if this_rank == self._num_min_workers - 1 and state['status'] == 'joinable':\n logger.info('Rank %s is responsible for join last call.', this_rank)\n last_call_deadline = time.time() + self._last_call_timeout\n self.handle_join_last_call(expected_version, last_call_deadline)\n logger.info('Rank %s finished join last call.', this_rank)\n logger.info('Waiting for remaining peers.')\n active_version = self.wait_for_peers(expected_version)\n state = json.loads(active_version.value)\n assert state['version'] == expected_version, 'Logic error: failed to observe version mismatch'\n return self.confirm_phase(expected_version, this_rank)", + "docstring": "We observed a rendezvous state in 'joinable' state, and attempt to join this particular version, and then wait for all other peers to join.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_rendezvous.py", + "ast_data": "FunctionDef name:join_phase arg:self arg:expected_version arguments arg arg Assign Call Assign Call Call If BoolOp Compare Compare Call Assign Call Call Call Call Assign Call Assign Call Compare Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_view_interval", + "source_code": "def get_view_interval(self):\n raise NotImplementedError('Derived must override')", + "docstring": "Return the view limits `` of the axis the tick belongs to.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:get_view_interval arg:self arguments arg Raise Call" + }, + { + "library": "kornia", + "name": "transform_output_tensor", + "source_code": "def transform_output_tensor(self, output: Tensor, output_shape: Tuple[int, ...]) -> Tensor:\n return _transform_output_shape(output, output_shape) if self.keepdim else output", + "docstring": "Standardize output tensors.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\base.py", + "ast_data": "FunctionDef name:transform_output_tensor arg:self arg:output arg:output_shape arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "make_parse_example_spec", + "source_code": "@doc_controls.header(_FEATURE_COLUMN_DEPRECATION_WARNING)\n@tf_export(v1=['feature_column.make_parse_example_spec'])\n@deprecation.deprecated(None, _FEATURE_COLUMN_DEPRECATION_RUNTIME_WARNING)\ndef make_parse_example_spec(feature_columns):\n result = {}\n for column in feature_columns:\n if not isinstance(column, _FeatureColumn):\n raise ValueError('All feature_columns must be _FeatureColumn instances. Given: {}'.format(column))\n config = column._parse_example_spec\n for key, value in six.iteritems(config):\n if key in result and value != result[key]:\n raise ValueError('feature_columns contain different parse_spec for key {}. Given {} and {}'.format(key, value, result[key]))\n result.update(config)\n return result", + "docstring": "Creates parsing spec dictionary from input feature_columns. The returned dictionary can be used as arg 'features' in . Typical usage example: For the above example, make_parse_example_spec would return the dict: Args: feature_columns: An iterable containing all feature columns. All items should be instances of classes derived from . Returns: A dict mapping each feature key to a or value. Raises: ValueError: If any of the given is not a instance.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:make_parse_example_spec arg:feature_columns arguments arg Assign For If Call Raise Call Call Assign For Call If BoolOp Compare Compare Raise Call Call Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "sparse_segment_mean_v2", + "source_code": "@tf_export('sparse.segment_mean', v1=[])\ndef sparse_segment_mean_v2(data, indices, segment_ids, num_segments=None, name=None, sparse_gradient=False):\n return sparse_segment_mean(data, indices, segment_ids, name=name, num_segments=num_segments, sparse_gradient=sparse_gradient)", + "docstring": "Computes the mean along sparse segments of a tensor. Read [the section on segmentation]( for an explanation of segments. Like , but can have rank less than 's first dimension, selecting a subset of dimension 0, specified by . is allowed to have missing ids, in which case the output will be zeros at those indices. In those cases is used to determine the size of the output. Args: data: A with data that will be assembled in the output. indices: A 1-D with indices into . Has same rank as . segment_ids: A 1-D with indices into the output . Values should be sorted and can be repeated. num_segments: An optional int32 scalar. Indicates the size of the output . name: A name for the operation (optional). sparse_gradient: An optional . Defaults to . If , the gradient of this function will be sparse () instead of dense (). The sparse gradient will contain one non-zero row for each unique index in . Returns: A of the shape as data, except for dimension 0 which has size , the number of segments specified via or inferred for the last element in .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:sparse_segment_mean_v2 arg:data arg:indices arg:segment_ids arg:num_segments arg:name arg:sparse_gradient arguments arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_ensure_servable", + "source_code": "def _ensure_servable(input_tensors, names_to_output_tensor_infos):\n plain_input_tensors = nest.flatten(input_tensors, expand_composites=True)\n graph = op_selector.get_unique_graph(plain_input_tensors)\n output_tensors = [utils.get_tensor_from_tensor_info(tensor, graph=graph) for tensor in names_to_output_tensor_infos.values()]\n plain_output_tensors = nest.flatten(output_tensors, expand_composites=True)\n dependency_ops = op_selector.get_backward_walk_ops(plain_output_tensors, stop_at_ts=plain_input_tensors)\n fed_tensors = object_identity.ObjectIdentitySet(plain_input_tensors)\n for dependency_op in dependency_ops:\n if _must_be_fed(dependency_op) and (not all((output in fed_tensors for output in dependency_op.outputs))):\n input_tensor_names = [tensor.name for tensor in plain_input_tensors]\n output_tensor_keys = list(names_to_output_tensor_infos.keys())\n output_tensor_names = [tensor.name for tensor in plain_output_tensors]\n dependency_path = op_selector.show_path(dependency_op, plain_output_tensors, plain_input_tensors)\n raise ValueError(f\"The signature's input tensors {input_tensor_names} are insufficient to compute its output keys {output_tensor_keys} (respectively, tensors {output_tensor_names}) because of the dependency on `{dependency_op.name}` which is not given as a signature input, as illustrated by the following dependency path: {dependency_path}\")", + "docstring": "Check that the signature outputs don't depend on unreachable placeholders. Args: input_tensors: An iterable of s specified as the signature's inputs. names_to_output_tensor_infos: An mapping from output names to respective s corresponding to the signature's output tensors. Raises: ValueError: If any of the signature's outputs depend on placeholders not provided as signature's inputs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_utils.py", + "ast_data": "FunctionDef name:_ensure_servable arg:input_tensors arg:names_to_output_tensor_infos arguments arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call For If BoolOp Call Call Compare Assign Assign Call Call Assign Assign Call Raise Call" + }, + { + "library": "kornia", + "name": "to_color_space", + "source_code": "def to_color_space(self, color_space: ColorSpace) -> Image:\n raise NotImplementedError", + "docstring": "Convert the image to a different color space.", + "type": "method", + "file_path": "kornia\\kornia\\image\\image.py", + "ast_data": "FunctionDef name:to_color_space arg:self arg:color_space arguments arg arg Raise" + }, + { + "library": "django", + "name": "__deepcopy__", + "source_code": "def __deepcopy__(self, memo):\n result = self.clone()\n memo[id(self)] = result\n return result", + "docstring": "Limit the amount of work when a Query is deepcopied.", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\query.py", + "ast_data": "FunctionDef name:__deepcopy__ arg:self arg:memo arguments arg arg Assign Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "depth_warp", + "source_code": "def depth_warp(pinhole_dst: PinholeCamera, pinhole_src: PinholeCamera, depth_src: Tensor, patch_dst: Tensor, height: int, width: int, align_corners: bool=True) -> Tensor:\n warper = DepthWarper(pinhole_dst, height, width, align_corners=align_corners)\n warper.compute_projection_matrix(pinhole_src)\n return warper(depth_src, patch_dst)", + "docstring": "Warp a tensor from destination frame to reference given the depth in the reference frame. See :class: for details. Example: >>> # pinholes camera models >>> pinhole_dst = PinholeCamera(torch.randn(1, 4, 4), torch.randn(1, 4, 4), ... torch.tensor([32]), torch.tensor([32])) >>> pinhole_src = PinholeCamera(torch.randn(1, 4, 4), torch.randn(1, 4, 4), ... torch.tensor([32]), torch.tensor([32])) >>> # warp the destination frame to reference by depth >>> depth_src = torch.ones(1, 1, 32, 32) # Nx1xHxW >>> image_dst = torch.rand(1, 3, 32, 32) # NxCxHxW >>> image_src = depth_warp(pinhole_dst, pinhole_src, depth_src, image_dst, 32, 32) # NxCxHxW", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\depth.py", + "ast_data": "FunctionDef name:depth_warp arg:pinhole_dst arg:pinhole_src arg:depth_src arg:patch_dst arg:height arg:width arg:align_corners arguments arg arg arg arg arg arg arg Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "ensure_uninitialized", + "source_code": "def ensure_uninitialized(self):\n with self._initialize_lock:\n if not self._initialized:\n return\n self._context_devices = None\n self._logical_devices = None\n self._server_def = None\n self._initialized = False\n if self._is_global_context:\n pywrap_tfe.TFE_Py_SetCEagerContext(None)\n self._context_handle = None", + "docstring": "Uninitialize handle and devices if not already done so.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:ensure_uninitialized arg:self arguments arg With If Return return:no Assign Assign Assign Assign If Call Assign" + }, + { + "library": "kornia", + "name": "RegularRenderer", + "source_code": "class RegularRenderer(VolumeRenderer):\n\n def forward(self, rgbs: Tensor, densities: Tensor, points_3d: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(rgbs, ['*', 'N', '3'])\n KORNIA_CHECK_SHAPE(densities, ['*', 'N'])\n KORNIA_CHECK_SHAPE(points_3d, ['*', 'N', '3'])\n num_ray_points: int = points_3d.shape[-2]\n points_3d = points_3d.reshape(-1, num_ray_points, 3)\n delta_3d = points_3d[0, 1, :] - points_3d[0, 0, :]\n delta = torch.linalg.norm(delta_3d, dim=-1)\n alpha = 1 - torch.exp(-1.0 * densities * delta)\n return self._render(alpha, rgbs)", + "docstring": "Renders 3D regularly sampled points along rays.", + "type": "class", + "file_path": "kornia\\kornia\\nerf\\volume_renderer.py", + "ast_data": "ClassDef name:RegularRenderer FunctionDef name:forward arg:self arg:rgbs arg:densities arg:points_3d arguments arg arg arg arg Call Call Call Assign Call Assign Assign Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "__getitem__", + "source_code": "def __getitem__(self, index):\n if 0 <= index < self.geom_count:\n return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)\n else:\n raise IndexError('Index out of range when accessing geometry in a collection: %s.' % index)", + "docstring": "Get the Geometry at the specified index.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:__getitem__ arg:self arg:index arguments arg arg If Compare Return return:yes Call Call Call Raise Call" + }, + { + "library": "sphinx", + "name": "textwidth", + "source_code": "def textwidth(text: str, widechars: str='WF') -> int:\n\n def charwidth(char: str, widechars: str) -> int:\n if east_asian_width(char) in widechars:\n return 2\n else:\n return 1\n return sum((charwidth(c, widechars) for c in text))", + "docstring": "Get width of text.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\rst.py", + "ast_data": "FunctionDef name:textwidth arg:text arg:widechars arguments arg arg FunctionDef name:charwidth arg:char arg:widechars arguments arg arg If Compare Call Return return:yes Return return:yes Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "dtype", + "source_code": "@property\ndef dtype(self):\n return self.gather_index.dtype", + "docstring": "Returns the dtype of the broadcast.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "__getattr__", + "source_code": "def __getattr__(self, attr):\n if attr != 'meta' and attr in self.meta:\n return self.meta[attr]\n else:\n raise AttributeError(f\"'{attr}' not in metadata\")", + "docstring": "Dispatch attribute access to the metadata.", + "type": "method", + "file_path": "scipy\\scipy\\odr\\_odrpack.py", + "ast_data": "FunctionDef name:__getattr__ arg:self arg:attr arguments arg arg If BoolOp Compare Compare Return return:yes Raise Call" + }, + { + "library": "pytorch", + "name": "_validate_device", + "source_code": "def _validate_device(location, backend_name):\n if not hasattr(torch, backend_name):\n raise RuntimeError(f\"The {backend_name.upper()} device module is not registered. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU.\")\n device_module = getattr(torch, backend_name)\n if hasattr(device_module, '_utils') and hasattr(device_module._utils, '_get_device_index'):\n device_index = device_module._utils._get_device_index(location, True)\n device = torch.device(backend_name, device_index)\n else:\n device = torch.device(location)\n device_index = device.index if device.index else 0\n if hasattr(device_module, 'is_available') and (not device_module.is_available()):\n raise RuntimeError(f\"Attempting to deserialize object on a {backend_name.upper()} device but torch.{backend_name}.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU.\")\n if hasattr(device_module, 'device_count'):\n device_count = device_module.device_count()\n if device_index >= device_count:\n raise RuntimeError(f'Attempting to deserialize object on {backend_name.upper()} device {device_index} but torch.{backend_name}.device_count() is {device_count}. Please use torch.load with map_location to map your storages to an existing device.')\n return device", + "docstring": "Check whether the device index of specified backend is valid In case of privateuse1 backend, your must first register a device_module for privateuse1 using torch._register_device_module. Implement the following methods in device_module like cuda: device_module._utils._get_device_index(location, True), device_module.device_count(). Args: location: string of device backend_name: the backend name or the name of privateuse1, which can be renamed Returns: device_index: int", + "type": "function", + "file_path": "pytorch\\torch\\serialization.py", + "ast_data": "FunctionDef name:_validate_device arg:location arg:backend_name arguments arg arg If Call Raise Call Call Assign Call If BoolOp Call Call Assign Call Assign Call Assign Call Assign If BoolOp Call Call Raise Call Call If Call Assign Call If Compare Raise Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "isf", + "source_code": "def isf(self, q, *args, **kwds):\n args, loc, scale = self._parse_args(*args, **kwds)\n q, loc, scale = map(asarray, (q, loc, scale))\n args = tuple(map(asarray, args))\n _a, _b = self._get_support(*args)\n cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)\n cond1 = (0 < q) & (q < 1)\n cond2 = cond0 & (q == 1)\n cond3 = cond0 & (q == 0)\n cond = cond0 & cond1\n output = np.full(shape(cond), fill_value=self.badvalue)\n lower_bound = _a * scale + loc\n upper_bound = _b * scale + loc\n place(output, cond2, argsreduce(cond2, lower_bound)[0])\n place(output, cond3, argsreduce(cond3, upper_bound)[0])\n if np.any(cond):\n goodargs = argsreduce(cond, *(q,) + args + (scale, loc))\n scale, loc, goodargs = (goodargs[-2], goodargs[-1], goodargs[:-2])\n place(output, cond, self._isf(*goodargs) * scale + loc)\n if output.ndim == 0:\n return output[()]\n return output", + "docstring": "Inverse survival function (inverse of ) at q of the given RV. Parameters ---------- q : array_like upper tail probability arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- x : ndarray or scalar Quantile corresponding to the upper tail probability q.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:isf arg:self arg:q arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Compare Compare Assign Compare Compare Assign Compare Assign Compare Assign Assign Call Call Assign Assign Call Call Call Call If Call Assign Call Assign Call Call If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "to_function_type", + "source_code": "def to_function_type(fullargspec):\n default_values = _to_default_values(fullargspec)\n parameters = []\n for arg in fullargspec.args:\n arg_name = function_type_lib.sanitize_arg_name(arg)\n parameters.append(function_type_lib.Parameter(arg_name, function_type_lib.Parameter.POSITIONAL_OR_KEYWORD, arg_name in default_values, None))\n if fullargspec.varargs is not None:\n parameters.append(function_type_lib.Parameter(fullargspec.varargs, function_type_lib.Parameter.VAR_POSITIONAL, False, None))\n for kwarg in fullargspec.kwonlyargs:\n parameters.append(function_type_lib.Parameter(function_type_lib.sanitize_arg_name(kwarg), function_type_lib.Parameter.KEYWORD_ONLY, kwarg in default_values, None))\n if fullargspec.varkw is not None:\n parameters.append(function_type_lib.Parameter(fullargspec.varkw, function_type_lib.Parameter.VAR_KEYWORD, False, None))\n return (function_type_lib.FunctionType(parameters), default_values)", + "docstring": "Generates FunctionType and default values from fullargspec.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_type_utils.py", + "ast_data": "FunctionDef name:to_function_type arg:fullargspec arguments arg Assign Call Assign For Assign Call Call Call Compare If Compare Call Call For Call Call Call Compare If Compare Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "__init__", + "source_code": "def __init__(self, file, hb_info=None):\n self._fid = file\n if hb_info is None:\n self._hb_info = HBInfo.from_file(file)\n else:\n self._hb_info = hb_info", + "docstring": "Create a HBFile instance. Parameters ---------- file : file-object StringIO work as well hb_info : HBInfo, optional Should be given as an argument for writing, in which case the file should be writable.", + "type": "method", + "file_path": "scipy\\scipy\\io\\_harwell_boeing\\hb.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:file arg:hb_info arguments arg arg arg Assign If Compare Assign Call Assign" + }, + { + "library": "scipy", + "name": "proc_gpool", + "source_code": "def proc_gpool(self):\n if self.g_cons is not None:\n for v in self.gpool:\n self.feasibility_check(v)\n self.gpool = set()", + "docstring": "Process all constraints.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py", + "ast_data": "FunctionDef name:proc_gpool arg:self arguments arg If Compare For Call Assign Call" + }, + { + "library": "matplotlib", + "name": "get_inner_bbox", + "source_code": "def get_inner_bbox(self, rows=0, cols=0):\n rows = np.atleast_1d(rows)\n cols = np.atleast_1d(cols)\n bbox = Bbox.from_extents(self.lefts[cols[0]].value() + self.margins['left'][cols[0]].value() + self.margins['leftcb'][cols[0]].value(), self.bottoms[rows[-1]].value() + self.margins['bottom'][rows[-1]].value() + self.margins['bottomcb'][rows[-1]].value(), self.rights[cols[-1]].value() - self.margins['right'][cols[-1]].value() - self.margins['rightcb'][cols[-1]].value(), self.tops[rows[0]].value() - self.margins['top'][rows[0]].value() - self.margins['topcb'][rows[0]].value())\n return bbox", + "docstring": "Return the inner bounding box of the subplot specs given by rows and cols. rows and cols can be spans.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py", + "ast_data": "FunctionDef name:get_inner_bbox arg:self arg:rows arg:cols arguments arg arg arg Assign Call Assign Call Assign Call Call Call Call Call Call Call Call Call Call Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "json_script", + "source_code": "def json_script(value, element_id=None, encoder=None):\n from django.core.serializers.json import DjangoJSONEncoder\n json_str = json.dumps(value, cls=encoder or DjangoJSONEncoder).translate(_json_script_escapes)\n if element_id:\n template = ''\n args = (element_id, mark_safe(json_str))\n else:\n template = ''\n args = (mark_safe(json_str),)\n return format_html(template, *args)", + "docstring": "Escape all the HTML/XML special characters with their unicode escapes, so value is safe to be output anywhere except for inside a tag attribute. Wrap the escaped JSON in a script tag.", + "type": "function", + "file_path": "django\\django\\utils\\html.py", + "ast_data": "FunctionDef name:json_script arg:value arg:element_id arg:encoder arguments arg arg arg Assign Call Call BoolOp If Assign Assign Call Assign Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "clear_tmp", + "source_code": "def clear_tmp():\n path = Path(__file__).resolve().parent / 'cache' / 'tmp'\n for child in path.iterdir():\n child.unlink()", + "docstring": "Clean the tmp directory", + "type": "function", + "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\common.py", + "ast_data": "FunctionDef name:clear_tmp arguments Assign Call Call For Call Call" + }, + { + "library": "pandas", + "name": "is_in_table", + "source_code": "@property\ndef is_in_table(self) -> bool:\n return self.queryables.get(self.lhs) is not None", + "docstring": "return True if this is a valid column name for generation (e.g. an actual column in the table)", + "type": "method", + "file_path": "pandas\\pandas\\core\\computation\\pytables.py", + "ast_data": "FunctionDef name:is_in_table arg:self arguments arg Return return:yes Compare Call" + }, + { + "library": "pandas", + "name": "dropna", + "source_code": "def dropna(self) -> Self:\n return self[~self.isna()]", + "docstring": "Return ExtensionArray without NA values. Returns ------- Self An ExtensionArray of the same type as the original but with all NA values removed. See Also -------- Series.dropna : Remove missing values from a Series. DataFrame.dropna : Remove missing values from a DataFrame. api.extensions.ExtensionArray.isna : Check for missing values in an ExtensionArray. Examples -------- >>> pd.array([1, 2, np.nan]).dropna() [1, 2] Length: 2, dtype: Int64", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\base.py", + "ast_data": "FunctionDef name:dropna arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "add_var_to_val", + "source_code": "def add_var_to_val(self, expr: sympy.Symbol, val: int) -> None:\n log.debug('add_var_to_val %s %s', expr, val, stack_info=True)\n assert expr not in self.var_to_val, f'{expr} already exists'\n self.var_to_val[expr] = sympy.Integer(val)", + "docstring": "Adds a new symbol to the symbolic environment.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:add_var_to_val arg:self arg:expr arg:val arguments arg arg arg Call Compare Assign Call" + }, + { + "library": "django", + "name": "metadata", + "source_code": "@metadata.setter\ndef metadata(self, value):\n for domain, metadata in value.items():\n domain = None if domain == 'DEFAULT' else domain.encode()\n for meta_name, meta_value in metadata.items():\n capi.set_ds_metadata_item(self._ptr, meta_name.encode(), meta_value.encode() if meta_value else None, domain)", + "docstring": "Set the metadata. Update only the domains that are contained in the value dictionary.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\base.py", + "ast_data": "FunctionDef name:metadata arg:self arg:value arguments arg arg For Call Assign Compare Call For Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "export_meta_graph", + "source_code": "def export_meta_graph(self, filename=None, collection_list=None, as_text=False, export_scope=None, clear_devices=False, clear_extraneous_savers=False, strip_default_attrs=False, save_debug_info=False):\n return export_meta_graph(filename=filename, graph_def=ops.get_default_graph().as_graph_def(add_shapes=True, use_pybind11_proto=True), saver_def=self.saver_def, collection_list=collection_list, as_text=as_text, export_scope=export_scope, clear_devices=clear_devices, clear_extraneous_savers=clear_extraneous_savers, strip_default_attrs=strip_default_attrs, save_debug_info=save_debug_info)", + "docstring": "Writes to save_path/filename. Args: filename: Optional meta_graph filename including the path. collection_list: List of string keys to collect. as_text: If , writes the meta_graph as an ASCII proto. export_scope: Optional . Name scope to remove. clear_devices: Whether or not to clear the device field for an or during export. clear_extraneous_savers: Remove any Saver-related information from the graph (both Save/Restore ops and SaverDefs) that are not associated with this Saver. strip_default_attrs: Boolean. If , default-valued attributes will be removed from the NodeDefs. For a detailed guide, see [Stripping Default-Valued Attributes]( save_debug_info: If , save the GraphDebugInfo to a separate file, which in the same directory of filename and with added before the file extension. Returns: A proto.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py", + "ast_data": "FunctionDef name:export_meta_graph arg:self arg:filename arg:collection_list arg:as_text arg:export_scope arg:clear_devices arg:clear_extraneous_savers arg:strip_default_attrs arg:save_debug_info arguments arg arg arg arg arg arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_static_check", + "source_code": "def _static_check(self):\n my_dtype = self.dtype\n if self._uniform_row_length is not None:\n if self._uniform_row_length.dtype != my_dtype:\n raise ValueError('_uniform_row_length.dtype=' + str(self._uniform_row_length.dtype) + ', not ' + str(my_dtype))\n if self._row_lengths is not None and self._row_lengths.dtype != my_dtype:\n raise ValueError('_row_lengths.dtype=' + str(self._row_lengths.dtype) + ', not ' + str(my_dtype))\n if self._value_rowids is not None and self._value_rowids.dtype != my_dtype:\n raise ValueError('_value_rowids.dtype=' + str(self._value_rowids.dtype) + ', not ' + str(my_dtype))\n if self._nrows is not None and self._nrows.dtype != my_dtype:\n raise ValueError('_nrows.dtype=' + str(self._nrows.dtype) + ', not ' + str(my_dtype))", + "docstring": "Checks if the object is internally consistent. Raises: ValueError if inconsistent.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:_static_check arg:self arguments arg Assign If Compare If Compare Raise Call Call Call If BoolOp Compare Compare Raise Call Call Call If BoolOp Compare Compare Raise Call Call Call If BoolOp Compare Compare Raise Call Call Call" + }, + { + "library": "tensorflow", + "name": "as_list", + "source_code": "def as_list(self):\n return self._flattened_inputs", + "docstring": "Returning the inputs as a list.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", + "ast_data": "FunctionDef name:as_list arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_mapped_registered_save_fn", + "source_code": "def _get_mapped_registered_save_fn(fn: Callable[..., tensor_lib.Tensor], trackables: Sequence[base.Trackable], call_with_mapped_captures: MappedCapturesCallable) -> Callable[[tensor_lib.Tensor], MappedCapturesCallable]:\n\n def save_fn(file_prefix: tensor_lib.Tensor) -> tensor_lib.Tensor:\n return fn(trackables=trackables, file_prefix=file_prefix)\n if call_with_mapped_captures is None:\n return save_fn\n else:\n tf_fn = def_function.function(save_fn, autograph=False)\n concrete = tf_fn.get_concrete_function(file_prefix=tensor_spec.TensorSpec(shape=(), dtype=dtypes.string))\n\n def save_fn_with_replaced_captures(file_prefix: tensor_lib.Tensor) -> tensor_lib.Tensor:\n return call_with_mapped_captures(concrete, [file_prefix])\n return save_fn_with_replaced_captures", + "docstring": "Converts the function to a python or tf.function with a single file arg.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\functional_saver.py", + "ast_data": "FunctionDef name:_get_mapped_registered_save_fn arg:fn arg:trackables arg:call_with_mapped_captures arguments arg arg arg FunctionDef name:save_fn arg:file_prefix arguments arg Return return:yes Call If Compare Return return:yes Assign Call Assign Call Call FunctionDef name:save_fn_with_replaced_captures arg:file_prefix arguments arg Return return:yes Call Return return:yes" + }, + { + "library": "django", + "name": "verbatim", + "source_code": "@register.tag\ndef verbatim(parser, token):\n nodelist = parser.parse(('endverbatim',))\n parser.delete_first_token()\n return VerbatimNode(nodelist.render(Context()))", + "docstring": "Stop the template engine from rendering the contents of this block tag. Usage:: {% verbatim %} {% don't process this %} {% endverbatim %} You can also designate a specific closing tag block (allowing the unrendered use of ``):: {% verbatim myblock %} ... {% endverbatim myblock %}", + "type": "function", + "file_path": "django\\django\\template\\defaulttags.py", + "ast_data": "FunctionDef name:verbatim arg:parser arg:token arguments arg arg Assign Call Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "efficient_conv_bn_eval", + "source_code": "def efficient_conv_bn_eval(bn: nn.modules.batchnorm._BatchNorm, conv: nn.modules.conv._ConvNd, x: torch.Tensor):\n assert bn.running_var is not None\n assert bn.running_mean is not None\n weight_on_the_fly = conv.weight\n if conv.bias is not None:\n bias_on_the_fly = conv.bias\n else:\n bias_on_the_fly = torch.zeros_like(bn.running_var)\n if bn.weight is not None:\n bn_weight = bn.weight\n else:\n bn_weight = torch.ones_like(bn.running_var)\n if bn.bias is not None:\n bn_bias = bn.bias\n else:\n bn_bias = torch.zeros_like(bn.running_var)\n target_shape = [-1] + [1] * (conv.weight.ndim - 1)\n if isinstance(conv, nn.modules.conv._ConvTransposeNd):\n target_shape[:2] = [target_shape[1], target_shape[0]]\n weight_coeff = torch.rsqrt(bn.running_var + bn.eps).reshape(target_shape)\n coefff_on_the_fly = bn_weight.view_as(weight_coeff) * weight_coeff\n weight_on_the_fly = weight_on_the_fly * coefff_on_the_fly\n bias_on_the_fly = bn_bias + coefff_on_the_fly.flatten() * (bias_on_the_fly - bn.running_mean)\n input = x\n params = {'weight': weight_on_the_fly, 'bias': bias_on_the_fly}\n output = functional_call(conv, params, input)\n return output", + "docstring": "Implementation based on \"Efficient ConvBN Blocks for Transfer Learning and Beyond\" It leverages the associative law between convolution and affine transform, i.e., normalize (weight conv feature) = (normalize weight) conv feature. It works for Eval mode of ConvBN blocks during validation, and can be used for **training** as well, but only if one sets . It reduces memory footprint and computation cost, at the cost of slightly reduced numerical stability. Args: bn (nn.modules.batchnorm._BatchNorm): a BatchNorm module. conv (nn.modules.conv._ConvNd): a conv module x (torch.Tensor): Input feature map.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\efficient_conv_bn_eval.py", + "ast_data": "FunctionDef name:efficient_conv_bn_eval arg:bn arg:conv arg:x arguments arg arg arg Compare Compare Assign If Compare Assign Assign Call If Compare Assign Assign Call If Compare Assign Assign Call Assign If Call Assign Assign Call Call Assign Call Assign Assign Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "model_from_json", + "source_code": "def model_from_json(json_string, custom_objects=None):\n config = json_utils.decode(json_string)\n from tensorflow.python.keras.layers import deserialize\n return deserialize(config, custom_objects=custom_objects)", + "docstring": "Parses a JSON model configuration string and returns a model instance. Usage: >>> model = tf.keras.Sequential([ ... tf.keras.layers.Dense(5, input_shape=(3,)), ... tf.keras.layers.Softmax()]) >>> config = model.to_json() >>> loaded_model = tf.keras.models.model_from_json(config) Args: json_string: JSON string encoding a model configuration. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A Keras model instance (uncompiled).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\model_config.py", + "ast_data": "FunctionDef name:model_from_json arg:json_string arg:custom_objects arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_transform", + "source_code": "def get_transform(self):\n return self._scale.get_transform()", + "docstring": "Return the transform used in the Axis' scale", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:get_transform arg:self arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_box_cox_optimize", + "source_code": "def _box_cox_optimize(self, x):\n mask = np.isnan(x)\n if np.all(mask):\n raise ValueError('Column must not be all nan.')\n _, lmbda = stats.boxcox(x[~mask], lmbda=None)\n return lmbda", + "docstring": "Find and return optimal lambda parameter of the Box-Cox transform by MLE, for observed data x. We here use scipy builtins which uses the brent optimizer.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", + "ast_data": "FunctionDef name:_box_cox_optimize arg:self arg:x arguments arg arg Assign Call If Call Raise Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "evaluate_generator", + "source_code": "def evaluate_generator(self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0):\n warnings.warn('`Model.evaluate_generator` is deprecated and will be removed in a future version. Please use `Model.evaluate`, which supports generators.')\n self._check_call_args('evaluate_generator')\n return self.evaluate(generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose, callbacks=callbacks)", + "docstring": "Evaluates the model on a data generator. DEPRECATED: now supports generators, so there is no longer any need to use this endpoint.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py", + "ast_data": "FunctionDef name:evaluate_generator arg:self arg:generator arg:steps arg:callbacks arg:max_queue_size arg:workers arg:use_multiprocessing arg:verbose arguments arg arg arg arg arg arg arg arg Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "dlrm_wrap", + "source_code": "def dlrm_wrap(X, lS_o, lS_i, device, ndevices=1):\n if ndevices == 1:\n lS_i = [S_i.to(device) for S_i in lS_i] if isinstance(lS_i, list) else lS_i.to(device)\n lS_o = [S_o.to(device) for S_o in lS_o] if isinstance(lS_o, list) else lS_o.to(device)\n return (X.to(device), lS_o, lS_i)", + "docstring": "Rewritten simpler version of found in dlrm_s_pytorch.py. This function simply moves the input tensors into the device and without the forward pass", + "type": "function", + "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\benchmarks\\dlrm_utils.py", + "ast_data": "FunctionDef name:dlrm_wrap arg:X arg:lS_o arg:lS_i arg:device arg:ndevices arguments arg arg arg arg arg If Compare Assign Call Call Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "convert_highlight_options", + "source_code": "def convert_highlight_options(app: Sphinx, config: Config) -> None:\n options = config.highlight_options\n if options and (not all((isinstance(v, dict) for v in options.values()))):\n config.highlight_options = {config.highlight_language: options}", + "docstring": "Convert old styled highlight_options to new styled one. * old style: options * new style: a dict which maps from language name to options", + "type": "function", + "file_path": "sphinx\\sphinx\\config.py", + "ast_data": "FunctionDef name:convert_highlight_options arg:app arg:config arguments arg arg Assign If BoolOp Call Call Call Assign" + }, + { + "library": "tensorflow", + "name": "hamming_window", + "source_code": "@tf_export('signal.hamming_window')\n@dispatch.add_dispatch_support\ndef hamming_window(window_length, periodic=True, dtype=dtypes.float32, name=None):\n return _raised_cosine_window(name, 'hamming_window', window_length, periodic, dtype, 0.54, 0.46)", + "docstring": "Generate a [Hamming][hamming] window. Args: window_length: A scalar indicating the window length to generate. periodic: A bool indicating whether to generate a periodic or symmetric window. Periodic windows are typically used for spectral analysis while symmetric windows are typically used for digital filter design. dtype: The data type to produce. Must be a floating point type. name: An optional name for the operation. Returns: A of shape of type . Raises: ValueError: If is not a floating point type. [hamming]:", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\window_ops.py", + "ast_data": "FunctionDef name:hamming_window arg:window_length arg:periodic arg:dtype arg:name arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "get_ordering", + "source_code": "def get_ordering(self, request):\n return self.ordering or ()", + "docstring": "Hook for specifying field ordering.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:get_ordering arg:self arg:request arguments arg arg Return return:yes BoolOp" + }, + { + "library": "seaborn", + "name": "_inverse", + "source_code": "def _inverse(self, values: ArrayLike) -> ArrayLike:\n return values", + "docstring": "Transform applied to results of mapping that returns to native values.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\properties.py", + "ast_data": "FunctionDef name:_inverse arg:self arg:values arguments arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "enable_graph_collection", + "source_code": "def enable_graph_collection(self):\n self.ensure_initialized()\n pywrap_tfe.TFE_ContextEnableGraphCollection(self._handle)", + "docstring": "Enables graph collection of executed functions. To retrieve the accumulated graphs call context.export_run_metadata() and to stop collecting graphs call context.disable_graph_collection().", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:enable_graph_collection arg:self arguments arg Call Call" + }, + { + "library": "pytorch", + "name": "unsafe_remove_auto_functionalized_pass", + "source_code": "def unsafe_remove_auto_functionalized_pass(ep: ExportedProgram) -> ExportedProgram:\n with ep.graph_module._set_replace_hook(ep.graph_signature.get_replace_hook()):\n for module in ep.graph_module.modules():\n if not isinstance(module, torch.fx.GraphModule):\n continue\n for node in ep.graph.nodes:\n if node.op == 'call_function' and node.target is auto_functionalized or (node.op == 'call_function' and node.target is auto_functionalized_v2):\n func = node.args[0]\n assert isinstance(func, torch._ops.OpOverload)\n node.meta['only_clone_these_tensors'] = []\n decompose_auto_functionalized(ep.graph)\n remove_self_clone(ep.graph)\n ep.graph.eliminate_dead_code()\n return ep", + "docstring": "This pass removes an instances of the higher order op 'auto_functionalized', and modifies the calling EP inplace to have the original mutator op. This pass doesn't perform safety checks to make sure that this inplace mutation is safe.", + "type": "function", + "file_path": "pytorch\\torch\\export\\_remove_auto_functionalized_pass.py", + "ast_data": "FunctionDef name:unsafe_remove_auto_functionalized_pass arg:ep arguments arg With Call Call For Call If Call For If BoolOp BoolOp Compare Compare BoolOp Compare Compare Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "aot_module", + "source_code": "def aot_module(mod: nn.Module, *args, **kwargs) -> nn.Module:\n torch._dynamo.utils.assert_no_fake_params_or_buffers(mod)\n\n def functional_call(named_params, named_buffers, *args, **kwargs):\n params_and_buffers = {**named_params, **named_buffers}\n return torch.func.functional_call(mod, params_and_buffers, args, kwargs)\n named_params = dict(mod.named_parameters(remove_duplicate=False))\n named_buffers = dict(mod.named_buffers(remove_duplicate=False))\n num_params_buffers = len(named_params) + len(named_buffers)\n compiled_f = aot_function(functional_call, *args, num_params_buffers=num_params_buffers, **kwargs)\n\n class AOTModule(nn.Module):\n\n def __init__(self) -> None:\n super().__init__()\n self.orig_module = mod\n\n def forward(self, *args, **kwargs):\n return compiled_f(named_params, named_buffers, *args, **kwargs)\n return AOTModule()", + "docstring": "Traces the forward and backward graph of :attr: using torch dispatch tracing mechanism. It is wrapper function, that underneath uses :func: to perform tracing and compilation. :func: lifts the parameters and buffers of `aot_functionaot_functionaot_functionmod`, but with forward and backward graph compiled.", + "type": "function", + "file_path": "pytorch\\torch\\_functorch\\aot_autograd.py", + "ast_data": "FunctionDef name:aot_module arg:mod arguments arg arg arg Call FunctionDef name:functional_call arg:named_params arg:named_buffers arguments arg arg arg arg Assign Return return:yes Call Assign Call Call Assign Call Call Assign Call Call Assign Call ClassDef name:AOTModule FunctionDef name:__init__ arg:self arguments arg Call Call Assign FunctionDef name:forward arg:self arguments arg arg arg Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, callbacks=None, add_history=False, add_progbar=False, model=None, **params):\n self.callbacks = nest.flatten(callbacks) if callbacks else []\n self._add_default_callbacks(add_history, add_progbar)\n if model:\n self.set_model(model)\n if params:\n self.set_params(params)\n self._supports_tf_logs = all((getattr(cb, '_supports_tf_logs', False) for cb in self.callbacks))\n self._batch_hooks_support_tf_logs = all((getattr(cb, '_supports_tf_logs', False) for cb in self.callbacks if cb._implements_train_batch_hooks() or cb._implements_test_batch_hooks() or cb._implements_predict_batch_hooks()))\n self._should_call_train_batch_hooks = any((cb._implements_train_batch_hooks() for cb in self.callbacks))\n self._should_call_test_batch_hooks = any((cb._implements_test_batch_hooks() for cb in self.callbacks))\n self._should_call_predict_batch_hooks = any((cb._implements_predict_batch_hooks() for cb in self.callbacks))\n self._disallow_batch_hooks_in_ps_strategy()\n self._check_timing = any((cbk.__class__.__name__ not in globals() for cbk in self.callbacks))\n self._num_batches_for_timing_check = 5\n self._hook_times = {}\n self._batch_start_time = None\n self._batch_times = []", + "docstring": "Container for instances. This object wraps a list of instances, making it possible to call them all at once via a single endpoint (e.g. ). Args: callbacks: List of instances. add_history: Whether a callback should be added, if one does not already exist in the list. add_progbar: Whether a callback should be added, if one does not already exist in the list. model: The these callbacks are used with. **params: If provided, parameters will be passed to each via .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:callbacks arg:add_history arg:add_progbar arg:model arguments arg arg arg arg arg arg Assign Call Call If Call If Call Assign Call Call Assign Call Call BoolOp Call Call Call Assign Call Call Assign Call Call Assign Call Call Call Assign Call Compare Call Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "non_max_suppression_with_overlaps", + "source_code": "@tf_export('image.non_max_suppression_overlaps')\n@dispatch.add_dispatch_support\ndef non_max_suppression_with_overlaps(overlaps, scores, max_output_size, overlap_threshold=0.5, score_threshold=float('-inf'), name=None):\n with ops.name_scope(name, 'non_max_suppression_overlaps'):\n overlap_threshold = ops.convert_to_tensor(overlap_threshold, name='overlap_threshold')\n return gen_image_ops.non_max_suppression_with_overlaps(overlaps, scores, max_output_size, overlap_threshold, score_threshold)", + "docstring": "Greedily selects a subset of bounding boxes in descending order of score. Prunes away boxes that have high overlap with previously selected boxes. N-by-n overlap values are supplied as square matrix. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the operation. For example: Args: overlaps: A 2-D float of shape representing the n-by-n box overlap values. scores: A 1-D float of shape representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer representing the maximum number of boxes to be selected by non-max suppression. overlap_threshold: A 0-D float tensor representing the threshold for deciding whether boxes overlap too much with respect to the provided overlap values. score_threshold: A 0-D float tensor representing the threshold for deciding when to remove boxes based on score. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer of shape representing the selected indices from the overlaps tensor, where .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py", + "ast_data": "FunctionDef name:non_max_suppression_with_overlaps arg:overlaps arg:scores arg:max_output_size arg:overlap_threshold arg:score_threshold arg:name arguments arg arg arg arg arg arg Call With Call Assign Call Return return:yes Call Call" + }, + { + "library": "authlib", + "name": "OAuth2ClientAuth", + "source_code": "class OAuth2ClientAuth(AuthBase, ClientAuth):\n\n def __call__(self, req):\n req.url, req.headers, req.body = self.prepare(req.method, req.url, req.headers, req.body)\n return req", + "docstring": "Attaches OAuth Client Authentication to the given Request object.", + "type": "class", + "file_path": "authlib\\authlib\\integrations\\requests_client\\oauth2_session.py", + "ast_data": "ClassDef name:OAuth2ClientAuth FunctionDef name:__call__ arg:self arg:req arguments arg arg Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "Problem03", + "source_code": "class Problem03(Benchmark):\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n self._bounds = [(-10, 10)]\n self.global_optimum = -6.7745761\n self.fglob = -12.03124\n\n def fun(self, x, *args):\n self.nfev += 1\n x = x[0]\n y = 0.0\n for k in range(1, 6):\n y += k * sin((k + 1) * x + k)\n return -y", + "docstring": "Univariate Problem03 objective function. This class defines the Univariate Problem03 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem03}}(x) = - \\sum_{k=1}^6 k \\sin[(k+1)x+k] Bound constraints: :math: .. figure:: figures/Problem03.png :alt: Univariate Problem03 function :align: center **Univariate Problem03 function** *Global optimum*: :math: for :math:", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py", + "ast_data": "ClassDef name:Problem03 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign For Call Call Return return:yes" + }, + { + "library": "django", + "name": "_check_id_field", + "source_code": "@classmethod\ndef _check_id_field(cls):\n fields = [f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk]\n if fields and (not fields[0].primary_key) and (cls._meta.pk.name == 'id'):\n return [checks.Error(\"'id' can only be used as a field name if the field also sets 'primary_key=True'.\", obj=cls, id='models.E004')]\n else:\n return []", + "docstring": "Check if field is a primary key.", + "type": "method", + "file_path": "django\\django\\db\\models\\base.py", + "ast_data": "FunctionDef name:_check_id_field arg:cls arguments arg Assign BoolOp Compare Compare If BoolOp Compare Return return:yes Call Return return:no" + }, + { + "library": "scipy", + "name": "sqeuclidean", + "source_code": "def sqeuclidean(u, v, w=None):\n utype, vtype = (None, None)\n if not (hasattr(u, 'dtype') and np.issubdtype(u.dtype, np.inexact)):\n utype = np.float64\n if not (hasattr(v, 'dtype') and np.issubdtype(v.dtype, np.inexact)):\n vtype = np.float64\n u = _validate_vector(u, dtype=utype)\n v = _validate_vector(v, dtype=vtype)\n u_v = u - v\n u_v_w = u_v\n if w is not None:\n w = _validate_weights(w)\n u_v_w = w * u_v\n return np.dot(u_v, u_v_w)", + "docstring": "Compute the squared Euclidean distance between two 1-D arrays. The squared Euclidean distance between and is defined as .. math:: \\sum_i{w_i |u_i - v_i|^2} Parameters ---------- u : (N,) array_like Input array. v : (N,) array_like Input array. w : (N,) array_like, optional The weights for each value in and . Default is None, which gives each value a weight of 1.0 Returns ------- sqeuclidean : double The squared Euclidean distance between vectors and . Examples -------- >>> from scipy.spatial import distance >>> distance.sqeuclidean([1, 0, 0], [0, 1, 0]) 2.0 >>> distance.sqeuclidean([1, 1, 0], [0, 1, 0]) 1.0", + "type": "function", + "file_path": "scipy\\scipy\\spatial\\distance.py", + "ast_data": "FunctionDef name:sqeuclidean arg:u arg:v arg:w arguments arg arg arg Assign If BoolOp Call Call Assign If BoolOp Call Call Assign Assign Call Assign Call Assign Assign If Compare Assign Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_check_minimal_version", + "source_code": "def _check_minimal_version(compiler_version: TorchVersion) -> None:\n min_version = '2024.2.1' if _IS_WINDOWS else '0.0.0'\n if compiler_version < TorchVersion(min_version):\n raise RuntimeError(f'Intel Compiler error: less than minimal version {min_version}.')", + "docstring": "On Windows: early version icx has issue, and can't preload correctly for inductor.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\cpp_builder.py", + "ast_data": "FunctionDef name:_check_minimal_version arg:compiler_version arguments arg Assign If Compare Call Raise Call" + }, + { + "library": "matplotlib", + "name": "new_figure_manager_given_figure", + "source_code": "@classmethod\ndef new_figure_manager_given_figure(cls, num, figure):\n return cls.FigureCanvas.new_manager(figure, num)", + "docstring": "Create a new figure manager instance for the given figure.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:new_figure_manager_given_figure arg:cls arg:num arg:figure arguments arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "identity", + "source_code": "def identity(self):\n return self.parent()", + "docstring": "See TensorArray.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:identity arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_timestamped_export_dir", + "source_code": "def get_timestamped_export_dir(export_dir_base):\n attempts = 0\n while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS:\n timestamp = int(time.time())\n result_dir = os.path.join(compat.as_bytes(export_dir_base), compat.as_bytes(str(timestamp)))\n if not gfile.Exists(result_dir):\n return result_dir\n time.sleep(1)\n attempts += 1\n logging.warning('Directory {} already exists; retrying (attempt {}/{})'.format(compat.as_str(result_dir), attempts, MAX_DIRECTORY_CREATION_ATTEMPTS))\n raise RuntimeError('Failed to obtain a unique export directory name after {} attempts.'.format(MAX_DIRECTORY_CREATION_ATTEMPTS))", + "docstring": "Builds a path to a new subdirectory within the base directory. Each export is written into a new subdirectory named using the current time. This guarantees monotonically increasing version numbers even across multiple runs of the pipeline. The timestamp used is the number of seconds since epoch UTC. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_utils.py", + "ast_data": "FunctionDef name:get_timestamped_export_dir arg:export_dir_base arguments arg Assign While Compare Assign Call Call Assign Call Call Call Call If Call Return return:yes Call Call Call Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "call", + "source_code": "def call(self, inputs, state):\n _check_rnn_cell_input_dtypes([inputs, state])\n sigmoid = math_ops.sigmoid\n one = constant_op.constant(1, dtype=dtypes.int32)\n if self._state_is_tuple:\n c, h = state\n else:\n c, h = array_ops.split(value=state, num_or_size_splits=2, axis=one)\n gate_inputs = math_ops.matmul(array_ops.concat([inputs, h], 1), self._kernel)\n gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)\n i, j, f, o = array_ops.split(value=gate_inputs, num_or_size_splits=4, axis=one)\n forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype)\n add = math_ops.add\n multiply = math_ops.multiply\n new_c = add(multiply(c, sigmoid(add(f, forget_bias_tensor))), multiply(sigmoid(i), self._activation(j)))\n new_h = multiply(self._activation(new_c), sigmoid(o))\n if self._state_is_tuple:\n new_state = LSTMStateTuple(new_c, new_h)\n else:\n new_state = array_ops.concat([new_c, new_h], 1)\n return (new_h, new_state)", + "docstring": "Long short-term memory cell (LSTM). Args: inputs: tensor with shape . state: An of state tensors, each shaped , if has been set to . Otherwise, a shaped . Returns: A pair containing the new hidden state, and the new state (either a or a concatenated state, depending on ).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py", + "ast_data": "FunctionDef name:call arg:self arg:inputs arg:state arguments arg arg arg Call Assign Assign Call If Assign Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Assign Assign Call Call Call Call Call Call Call Assign Call Call Call If Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "validate_checkpoint_id", + "source_code": "@classmethod\ndef validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:\n return os.path.isfile(checkpoint_id)", + "docstring": "Implementation of the StorageReader method", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\format_utils.py", + "ast_data": "FunctionDef name:validate_checkpoint_id arg:cls arg:checkpoint_id arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "OpDispatcher", + "source_code": "@tf_export('__internal__.dispatch.OpDispatcher', v1=[])\nclass OpDispatcher(object):\n NOT_SUPPORTED = object()\n\n def handle(self, args, kwargs):\n return self.NOT_SUPPORTED\n\n def register(self, op):\n if not hasattr(op, FALLBACK_DISPATCH_ATTR):\n raise AssertionError('Dispatching not enabled for %s' % op)\n getattr(op, FALLBACK_DISPATCH_ATTR).append(self)", + "docstring": "Abstract base class for TensorFlow operator dispatchers. Each operation dispatcher acts as an override handler for a single TensorFlow operation, and its results are used when the handler indicates that it can handle the operation's arguments (by returning any value other than ).", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py", + "ast_data": "ClassDef name:OpDispatcher Assign Call FunctionDef name:handle arg:self arg:args arg:kwargs arguments arg arg arg Return return:yes FunctionDef name:register arg:self arg:op arguments arg arg If Call Raise Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "evaluate", + "source_code": "def evaluate(self, points):\n points = np.atleast_2d(points)\n dim, num_m = np.array(points).shape\n if dim != self.dim:\n raise ValueError(f'points have dimension {dim}, dataset has dimension {self.dim}')\n result = np.zeros(num_m)\n if num_m >= self.num_dp:\n for i in range(self.num_dp):\n diff = self.dataset[:, i, np.newaxis] - points\n tdiff = np.dot(self.inv_cov, diff)\n energy = np.sum(diff * tdiff, axis=0) / 2.0\n result = result + np.exp(-energy)\n else:\n for i in range(num_m):\n diff = self.dataset - points[:, i, np.newaxis]\n tdiff = np.dot(self.inv_cov, diff)\n energy = np.sum(diff * tdiff, axis=0) / 2.0\n result[i] = np.sum(np.exp(-energy), axis=0)\n result = result / self.norm_factor\n return result", + "docstring": "Evaluate the estimated pdf on a set of points. Parameters ---------- points : (# of dimensions, # of points)-array Alternatively, a (# of dimensions,) vector can be passed in and treated as a single point. Returns ------- (# of points,)-array The values at each point. Raises ------ ValueError : if the dimensionality of the input points is different than the dimensionality of the KDE.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\mlab.py", + "ast_data": "FunctionDef name:evaluate arg:self arg:points arguments arg arg Assign Call Assign Call If Compare Raise Call Assign Call If Compare For Call Assign Assign Call Assign Call Assign Call For Call Assign Assign Call Assign Call Assign Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_identity_broadcaster", + "source_code": "@classmethod\ndef get_identity_broadcaster(cls, nvals, dtype=None):\n return _GatherLayerBroadcaster(math_ops.range(nvals, dtype=dtype))", + "docstring": "Create an identity broadcaster. TODO(martinz): an identity broadcaster can be far more efficient than a generic broadcaster. Add an optimized implementation. Args: nvals: the number of values for the broadcaster. dtype: the dtype of the broadcaster, or None to use the dtype of nvals. Returns: an identity broadcaster from [0....nvals-1] to [0...nvals-1]", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:get_identity_broadcaster arg:cls arg:nvals arg:dtype arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "metadata", + "source_code": "def metadata(self) -> ShardedTensorMetadata:\n return self._metadata", + "docstring": "Returns a :class: object corresponding to the metadata for the entire tensor.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\api.py", + "ast_data": "FunctionDef name:metadata arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "to_language", + "source_code": "def to_language(self):\n return self.__to_language", + "docstring": "Return the translation language name.", + "type": "method", + "file_path": "django\\django\\utils\\translation\\trans_real.py", + "ast_data": "FunctionDef name:to_language arg:self arguments arg Return return:yes" + }, + { + "library": "authlib", + "name": "generate_authorization_code", + "source_code": "def generate_authorization_code(self):\n return generate_token(self.AUTHORIZATION_CODE_LENGTH)", + "docstring": "\"The method to generate \"code\" value for authorization code data. Developers may rewrite this method, or customize the code length with:: class MyAuthorizationCodeGrant(AuthorizationCodeGrant): AUTHORIZATION_CODE_LENGTH = 32 # default is 48", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\authorization_code.py", + "ast_data": "FunctionDef name:generate_authorization_code arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "find_sycl_config", + "source_code": "def find_sycl_config():\n basekit_path = _get_basekit_path()\n toolkit_path = _get_toolkit_path()\n if not os.path.exists(basekit_path):\n raise ConfigError('Specified SYCL_TOOLKIT_PATH \"{}\" does not exist'.format(basekit_path))\n result = {}\n result['sycl_basekit_path'] = basekit_path\n result['sycl_toolkit_path'] = toolkit_path\n result.update(_find_sycl_config(basekit_path))\n return result", + "docstring": "Returns a dictionary of SYCL components config info.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_sycl_config.py", + "ast_data": "FunctionDef name:find_sycl_config arguments Assign Call Assign Call If Call Raise Call Call Assign Assign Assign Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "flatnotmasked_edges", + "source_code": "def flatnotmasked_edges(a):\n m = getmask(a)\n if m is nomask or not np.any(m):\n return np.array([0, a.size - 1])\n unmasked = np.flatnonzero(~m)\n if len(unmasked) > 0:\n return unmasked[[0, -1]]\n else:\n return None", + "docstring": "Find the indices of the first and last unmasked values. Expects a 1-D , returns None if all values are masked. Parameters ---------- a : array_like Input 1-D Returns ------- edges : ndarray or None The indices of first and last non-masked value in the array. Returns None if all values are masked. See Also -------- flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges clump_masked, clump_unmasked Notes ----- Only accepts 1-D arrays. Examples -------- >>> import numpy as np >>> a = np.ma.arange(10) >>> np.ma.flatnotmasked_edges(a) array([0, 9]) >>> mask = (a 8) | (a == 5) >>> a[mask] = np.ma.masked >>> np.array(a[~a.mask]) array([3, 4, 6, 7, 8]) >>> np.ma.flatnotmasked_edges(a) array([3, 8]) >>> a[:] = np.ma.masked >>> print(np.ma.flatnotmasked_edges(a)) None", + "type": "function", + "file_path": "numpy\\numpy\\ma\\extras.py", + "ast_data": "FunctionDef name:flatnotmasked_edges arg:a arguments arg Assign Call If BoolOp Compare Call Return return:yes Call Assign Call If Compare Call Return return:yes Return return:no" + }, + { + "library": "django", + "name": "is_same_domain", + "source_code": "def is_same_domain(host, pattern):\n if not pattern:\n return False\n pattern = pattern.lower()\n return pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or pattern == host", + "docstring": "Return ``). Anything else is an exact string match.", + "type": "function", + "file_path": "django\\django\\utils\\http.py", + "ast_data": "FunctionDef name:is_same_domain arg:host arg:pattern arguments arg arg If Return return:yes Assign Call Return return:yes BoolOp BoolOp Compare BoolOp Call Compare Compare" + }, + { + "library": "pytorch", + "name": "HandleTrainingState", + "source_code": "class HandleTrainingState(Enum):\n IDLE = auto()\n FORWARD = auto()\n BACKWARD_PRE = auto()\n BACKWARD_POST = auto()\n SUMMON_FULL_PARAMS = auto()", + "docstring": "An enum that indicates the state of a `.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_common_utils.py", + "ast_data": "ClassDef name:HandleTrainingState Assign Call Assign Call Assign Call Assign Call Assign Call" + }, + { + "library": "pytorch", + "name": "using_b200", + "source_code": "@functools.lru_cache\ndef using_b200() -> bool:\n if not torch.cuda.is_available():\n return False\n device_properties = torch.cuda.get_device_properties(torch.cuda.current_device())\n return device_properties.major == 10", + "docstring": "Returns true if the device is a NVIDIA B200, otherwise returns false.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\kernel\\mm.py", + "ast_data": "FunctionDef name:using_b200 arguments If Call Return return:yes Assign Call Call Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "_write_report", + "source_code": "def _write_report(self, content):\n line = '%s %s' % (_TRACER_LOG_PREFIX, content)\n if self._report_file:\n self._report_file.write(line)\n else:\n logging.info(line)", + "docstring": "Writes the given content to the report.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py", + "ast_data": "FunctionDef name:_write_report arg:self arg:content arguments arg arg Assign If Call Call" + }, + { + "library": "tensorflow", + "name": "TPUEmbeddingShardedSaveable", + "source_code": "class TPUEmbeddingShardedSaveable(saveable_object.SaveableObject):\n\n def __init__(self, variable: tf_variables.Variable, shard_id: int, num_shards: int, shard_dim: int, name: str):\n self._shard_id = shard_id\n self._variable = variable\n var_offset = [0] * len(variable.shape)\n var_offset[shard_dim] = shard_id * variable.shape[shard_dim]\n fullshape = variable.shape.as_list()\n fullshape[shard_dim] = num_shards * fullshape[shard_dim]\n save_slice_info = tf_variables.Variable.SaveSliceInfo(full_name=name, full_shape=fullshape, var_offset=var_offset, var_shape=variable.shape.as_list())\n spec = saveable_object.SaveSpec(tensor=variable.read_value, slice_spec=save_slice_info.spec, name=name, dtype=variable.dtype, device=variable.device)\n super().__init__(variable.read_value, [spec], name)\n\n def restore(self, restored_tensors: List[tensor.Tensor], restored_shapes: List[tensor_shape.TensorShape]) -> Any:\n del restored_shapes\n restored_tensor = restored_tensors[0]\n return values_util.assign_on_device(self._variable.device, self._variable, restored_tensor)", + "docstring": "Defines how to save and restore a shard of TPUEmbedding sharded variable.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py", + "ast_data": "ClassDef name:TPUEmbeddingShardedSaveable FunctionDef name:__init__ arg:self arg:variable arg:shard_id arg:num_shards arg:shard_dim arg:name arguments arg arg arg arg arg arg Assign Assign Assign Call Assign Assign Call Assign Assign Call Call Assign Call Call Call FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg Assign Return return:yes Call" + }, + { + "library": "django", + "name": "__reduce__", + "source_code": "def __reduce__(self):\n return (getattr, (self.field.model, self.field.name))", + "docstring": "Pickling should return the instance attached by self.field on the model, not a new copy of that descriptor. Use getattr() to retrieve the instance directly from the model.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\related_descriptors.py", + "ast_data": "FunctionDef name:__reduce__ arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "clone", + "source_code": "def clone(self):\n name, path, args, kwargs = self.deconstruct()\n return self.__class__(*args, **kwargs)", + "docstring": "Uses deconstruct() to clone a new copy of this Field. Will not preserve any class attachments/attribute names.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\__init__.py", + "ast_data": "FunctionDef name:clone arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_get_dtype", + "source_code": "def _get_dtype(dtype):\n if np.issubdtype(dtype, np.complexfloating):\n return np.complex128\n else:\n return np.float64", + "docstring": "Return np.complex128 for complex dtypes, np.float64 otherwise.", + "type": "function", + "file_path": "scipy\\scipy\\interpolate\\_bsplines.py", + "ast_data": "FunctionDef name:_get_dtype arg:dtype arguments arg If Call Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "_get_failure_view", + "source_code": "def _get_failure_view():\n return get_callable(settings.CSRF_FAILURE_VIEW)", + "docstring": "Return the view to be used for CSRF rejections.", + "type": "function", + "file_path": "django\\django\\middleware\\csrf.py", + "ast_data": "FunctionDef name:_get_failure_view arguments Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_rng_state", + "source_code": "def get_rng_state(device: Union[int, str, torch.device]='mps') -> Tensor:\n return _get_default_mps_generator().get_state()", + "docstring": "Returns the random number generator state as a ByteTensor. Args: device (torch.device or int, optional): The device to return the RNG state of. Default: ``, the current MPS device).", + "type": "function", + "file_path": "pytorch\\torch\\mps\\__init__.py", + "ast_data": "FunctionDef name:get_rng_state arg:device arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "pop_tape", + "source_code": "def pop_tape(tape):\n pywrap_tfe.TFE_Py_TapeSetRemove(tape._tape)", + "docstring": "Pops the given tape in the stack.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\eager\\tape.py", + "ast_data": "FunctionDef name:pop_tape arg:tape arguments arg Call" + }, + { + "library": "pytorch", + "name": "step", + "source_code": "def step(self, *args, target=None, losses: Optional[list]=None, **kwargs):\n self._stage.clear_runtime_states()\n args_split, kwargs_split = self._split_inputs(args, kwargs)\n if target is not None:\n targets_split = list(torch.tensor_split(target, self._n_microbatches))\n else:\n targets_split = None\n self._step_microbatches(args_split, kwargs_split, targets_split, losses)\n if self._stage.is_last:\n return self._merge_outputs(self._stage.output_chunks)\n else:\n return None", + "docstring": "Run one iteration of the pipeline schedule with *whole-batch* input. Will chunk the input into microbatches automatically, and go through the microbatches according to the schedule implementation. args: positional arguments to the model (as in non-pipeline case). kwargs: keyword arguments to the model (as in non-pipeline case). target: target for the loss function. losses: a list to store the losses for each microbatch.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py", + "ast_data": "FunctionDef name:step arg:self arguments arg arg arg arg arg Call Assign Call If Compare Assign Call Call Assign Call If Return return:yes Call Return return:no" + }, + { + "library": "tensorflow", + "name": "_gen", + "source_code": "def _gen(data):\n index_array = np.arange(num_samples)\n for _ in range(epochs):\n if shuffle:\n np.random.shuffle(index_array)\n batches = generic_utils.make_batches(num_samples, batch_size)\n for batch_start, batch_end in batches:\n batch_ids = index_array[batch_start:batch_end]\n flat_batch_data = training_utils.slice_arrays(nest.flatten(data), batch_ids, contiguous=not shuffle)\n yield nest.pack_sequence_as(data, flat_batch_data)", + "docstring": "Makes a generator out of a structure of NumPy/EagerTensors.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_generator_v1.py", + "ast_data": "FunctionDef name:_gen arg:data arguments arg Assign Call For Call If Call Assign Call For Assign Assign Call Call Call" + }, + { + "library": "kornia", + "name": "scale", + "source_code": "def scale(self, scale_factor: Tensor) -> 'PinholeCamera':\n intrinsics: Tensor = self.intrinsics.clone()\n intrinsics[..., 0, 0] *= scale_factor\n intrinsics[..., 1, 1] *= scale_factor\n intrinsics[..., 0, 2] *= scale_factor\n intrinsics[..., 1, 2] *= scale_factor\n height: Tensor = scale_factor * self.height.clone()\n width: Tensor = scale_factor * self.width.clone()\n return PinholeCamera(intrinsics, self.extrinsics, height, width)", + "docstring": "Scale the pinhole model. Args: scale_factor: a tensor with the scale factor. It has to be broadcastable with class members. The expected shape is :math: or :math:. Returns: the camera model with scaled parameters.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", + "ast_data": "FunctionDef name:scale arg:self arg:scale_factor arguments arg arg Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "FullStateDictConfig", + "source_code": "@dataclass\nclass FullStateDictConfig(StateDictConfig):\n rank0_only: bool = False", + "docstring": "`state_dict_typestatesync_module_states`)", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\fsdp\\api.py", + "ast_data": "ClassDef name:FullStateDictConfig" + }, + { + "library": "pytorch", + "name": "_get_symmetric_qnnpack_qat_qconfig_mapping", + "source_code": "def _get_symmetric_qnnpack_qat_qconfig_mapping() -> QConfigMapping:\n default_qconfig = default_symmetric_qnnpack_qat_qconfig\n return _get_default_qconfig_mapping_with_default_qconfig(True, 'qnnpack', default_qconfig)", + "docstring": "Return a QConfigMapping that uses as the default QConfig.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\qconfig_mapping.py", + "ast_data": "FunctionDef name:_get_symmetric_qnnpack_qat_qconfig_mapping arguments Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "CompiledFxGraphLoadable", + "source_code": "@dataclass\nclass CompiledFxGraphLoadable(InductorOutput[CompiledFxGraph]):\n result: CompiledFxGraph\n\n def pre_save(self) -> None:\n disk_compiled_graph = copy(self.result)\n disk_compiled_graph.prepare_for_serialization()\n self.result = disk_compiled_graph\n return\n\n def load(self, example_inputs) -> CompiledFxGraph:\n self.example_inputs = example_inputs\n return self.result\n\n def post_compile(self, result: CompiledFxGraph, fx_config: _CompileFxKwargs) -> CompiledFxGraph:\n constants = CompiledFxGraphConstants()\n graph, cache_info = FxGraphCache.cache_hit_post_compile(result, {}, constants)\n if graph is None:\n raise BypassAOTAutogradCache('Failed to reload cache entry from disk')\n torch._logging.trace_structured('artifact', metadata_fn=lambda: {'name': 'fx_graph_bundled_cache_hit', 'encoding': 'json'}, payload_fn=lambda: json.dumps(cache_info))\n counters['inductor']['fxgraph_cache_hit'] += 1\n graph.post_compile(self.example_inputs, constants, fx_config)\n return graph", + "docstring": "A full compiled fx graph that doesn't need to lookup the FxGraphCache to run", + "type": "class", + "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py", + "ast_data": "ClassDef name:CompiledFxGraphLoadable FunctionDef name:pre_save arg:self arguments arg Assign Call Call Assign Return return:no FunctionDef name:load arg:self arg:example_inputs arguments arg arg Assign Return return:yes FunctionDef name:post_compile arg:self arg:result arg:fx_config arguments arg arg arg Assign Call Assign Call If Compare Raise Call Call arguments arguments Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_get_font", + "source_code": "def _get_font(self, prop):\n filenames = _fontManager._find_fonts_by_props(prop)\n font = get_font(filenames)\n font.set_size(self.FONT_SCALE, self.DPI)\n return font", + "docstring": "Find the matching font properties *prop*, with its size set.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\textpath.py", + "ast_data": "FunctionDef name:_get_font arg:self arg:prop arguments arg arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "axisinfo", + "source_code": "@staticmethod\ndef axisinfo(unit: tzinfo | None, axis) -> munits.AxisInfo:\n tz = unit\n majloc = PandasAutoDateLocator(tz=tz)\n majfmt = PandasAutoDateFormatter(majloc, tz=tz)\n datemin = pydt.date(2000, 1, 1)\n datemax = pydt.date(2010, 1, 1)\n return munits.AxisInfo(majloc=majloc, majfmt=majfmt, label='', default_limits=(datemin, datemax))", + "docstring": "Return the :class: for *unit*. *unit* is a tzinfo instance or None. The *axis* argument is required but not used.", + "type": "method", + "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py", + "ast_data": "FunctionDef name:axisinfo arg:unit arg:axis arguments arg arg Assign Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "FFMpegWriter", + "source_code": "@writers.register('ffmpeg')\nclass FFMpegWriter(FFMpegBase, MovieWriter):\n\n def _args(self):\n args = [self.bin_path(), '-f', 'rawvideo', '-vcodec', 'rawvideo', '-s', '%dx%d' % self.frame_size, '-pix_fmt', self.frame_format, '-framerate', str(self.fps)]\n if _log.getEffectiveLevel() > logging.DEBUG:\n args += ['-loglevel', 'error']\n args += ['-i', 'pipe:'] + self.output_args\n return args", + "docstring": "Pipe-based ffmpeg writer. Frames are streamed directly to ffmpeg via a pipe and written in a single pass. This effectively works as a slideshow input to ffmpeg with the fps passed as `their notes on frame rates`_ for further details. .. _their notes on frame rates:", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\animation.py", + "ast_data": "ClassDef name:FFMpegWriter FunctionDef name:_args arg:self arguments arg Assign Call Call If Compare Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "dynamic_list_append", + "source_code": "def dynamic_list_append(target, element):\n if isinstance(target, tensor_array_ops.TensorArray):\n return target.write(target.size(), element)\n if isinstance(target, tensor.Tensor):\n return list_ops.tensor_list_push_back(target, element)\n target.append(element)\n return target", + "docstring": "Converts a list append call inline.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\utils\\tensor_list.py", + "ast_data": "FunctionDef name:dynamic_list_append arg:target arg:element arguments arg arg If Call Return return:yes Call Call If Call Return return:yes Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "to_iter_dict", + "source_code": "def to_iter_dict(self) -> Generator[tuple[str, Self]]:\n key = lambda block: str(block.dtype)\n for dtype, blocks in itertools.groupby(sorted(self.blocks, key=key), key=key):\n yield (dtype, self._combine(list(blocks)))", + "docstring": "Yield a tuple of (str(dtype), BlockManager) Returns ------- values : a tuple of (str(dtype), BlockManager)", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:to_iter_dict arg:self arguments arg Assign arguments arg Call For Call Call Call Call" + }, + { + "library": "django", + "name": "fromstr", + "source_code": "def fromstr(string, **kwargs):\n return GEOSGeometry(string, **kwargs)", + "docstring": "Given a string value, return a GEOSGeometry object.", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\geos\\factory.py", + "ast_data": "FunctionDef name:fromstr arg:string arguments arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_convert_obj", + "source_code": "def _convert_obj(self, obj: NDFrameT) -> NDFrameT:\n return obj._consolidate()", + "docstring": "Provide any conversions for the object in order to correctly handle. Parameters ---------- obj : Series or DataFrame Returns ------- Series or DataFrame", + "type": "method", + "file_path": "pandas\\pandas\\core\\resample.py", + "ast_data": "FunctionDef name:_convert_obj arg:self arg:obj arguments arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "shape", + "source_code": "@property\ndef shape(self) -> tuple[int, ...]:\n return self.data.shape", + "docstring": "Return the image shape.", + "type": "method", + "file_path": "kornia\\kornia\\image\\image.py", + "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, tpu_hardware_feature_proto):\n self.tpu_hardware_feature_proto = tpu_hardware_feature_proto", + "docstring": "Store TPU hardware feature info. Args: tpu_hardware_feature_proto: protobuf which describe the tpu hardware feature.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_hardware_feature.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:tpu_hardware_feature_proto arguments arg arg Assign" + }, + { + "library": "matplotlib", + "name": "calculate_plane_coefficients", + "source_code": "def calculate_plane_coefficients(self, z):\n return self.get_cpp_triangulation().calculate_plane_coefficients(z)", + "docstring": "Calculate plane equation coefficients for all unmasked triangles from the point (x, y) coordinates and specified z-array of shape (npoints). The returned array has shape (npoints, 3) and allows z-value at (x, y) position in triangle tri to be calculated using ``.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triangulation.py", + "ast_data": "FunctionDef name:calculate_plane_coefficients arg:self arg:z arguments arg arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "var", + "source_code": "def var(self, alpha, n):\n a, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n)\n n, Sa = (n[..., np.newaxis], Sa[..., np.newaxis])\n return n * a / Sa * (1 - a / Sa) * (n + Sa) / (1 + Sa)", + "docstring": "The variance of the Dirichlet multinomial distribution. Parameters ---------- %(_dirichlet_mn_doc_default_callparams)s Returns ------- out: array_like The variances of the components of the distribution. This is the diagonal of the covariance matrix of the distribution.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:var arg:self arg:alpha arg:n arguments arg arg arg Assign Call Assign Return return:yes" + }, + { + "library": "sphinx", + "name": "desc_annotation", + "source_code": "class desc_annotation(nodes.Part, nodes.Inline, nodes.FixedTextElement):\n pass", + "docstring": "Node for signature annotations (not Python 3-style annotations).", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:desc_annotation" + }, + { + "library": "pytorch", + "name": "is_memory_copy", + "source_code": "def is_memory_copy(self) -> bool:\n return len(self.memory_usage[MemoryUsageType.LOAD]) == 1 and len(self.memory_usage[MemoryUsageType.STORE]) == 1 and (len(self.submodules) == 1) and self.root_block.contains_only_ops(('load', 'store'))", + "docstring": "True of this contains only a single loads and store. Note, this could involve a layout change.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\loop_body.py", + "ast_data": "FunctionDef name:is_memory_copy arg:self arguments arg Return return:yes BoolOp Compare Call Compare Call Compare Call Call" + }, + { + "library": "pytorch", + "name": "forward", + "source_code": "def forward(self, *args: Any, **kwargs: Any) -> Any:\n handle = self._handle\n with torch.autograd.profiler.record_function('FullyShardedDataParallel.forward'):\n args, kwargs = _root_pre_forward(self, self, args, kwargs)\n unused = None\n args, kwargs = _pre_forward(self, handle, _pre_forward_unshard, self._fsdp_wrapped_module, args, kwargs)\n if handle:\n _p_assert(handle.flat_param.device == self.compute_device, f'Expected `FlatParameter` to be on the compute device {self.compute_device} but got {handle.flat_param.device}')\n output = self._fsdp_wrapped_module(*args, **kwargs)\n return _post_forward(self, handle, _post_forward_reshard, self, unused, output)", + "docstring": "Run the forward pass for the wrapped module, inserting FSDP-specific pre- and post-forward sharding logic.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py", + "ast_data": "FunctionDef name:forward arg:self arguments arg arg arg Assign With Call Assign Call Assign Assign Call If Call Compare Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "allow_mutation_on_saved_tensors", + "source_code": "@contextlib.contextmanager\ndef allow_mutation_on_saved_tensors() -> Generator[_AllowMutationOnSavedContext, None, None]:\n global _allow_mutation_on_saved_tensors_enabled\n ctx = _AllowMutationOnSavedContext()\n with _swap_with_cloned(ctx), _CloneArgBeforeMutateMode(ctx):\n try:\n if _allow_mutation_on_saved_tensors_enabled:\n raise RuntimeError('allow_mutation_on_saved_tensors contexts cannot be nested')\n _allow_mutation_on_saved_tensors_enabled = True\n yield ctx\n finally:\n ctx.clear()\n _allow_mutation_on_saved_tensors_enabled = False", + "docstring": "Context manager under which mutating tensors saved for backward is allowed. Under this context manager, tensors saved for backward are cloned on mutation, so the original version can still be used during backward. Normally, mutating a tensor saved for backward will result in an error raised when it's used during backward. To ensure the correct behavior, both the forward and backward should be run under the same context manager. Returns: An _AllowMutationOnSavedContext object storing the state managed by this context manager. This object can be useful for debugging purposes. The state managed by the context manager is automatically cleared upon exiting. Example:: >>> import torch >>> with torch.autograd.graph.allow_mutation_on_saved_tensors(): ... # forward ... a = torch.ones(2, 3, requires_grad=True) ... b = a.clone() ... out = (b**2).sum() ... b.sin_() ... # backward ... out.sum().backward() ... tensor([[0.8415, 0.8415, 0.8415], [0.8415, 0.8415, 0.8415]], grad_fn=)", + "type": "function", + "file_path": "pytorch\\torch\\autograd\\graph.py", + "ast_data": "FunctionDef name:allow_mutation_on_saved_tensors arguments Assign Call With Call Call Try If Raise Call Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "uniform_full_int", + "source_code": "def uniform_full_int(self, shape, dtype=dtypes.uint64, name=None):\n dtype = dtypes.as_dtype(dtype)\n with ops.name_scope(name, 'stateful_uniform_full_int', [shape]) as name:\n shape = _shape_tensor(shape)\n return self._uniform_full_int(shape=shape, dtype=dtype, name=name)", + "docstring": "Uniform distribution on an integer type's entire range. This method is the same as setting and to in the method. Args: shape: the shape of the output. dtype: (optional) the integer type, default to uint64. name: (optional) the name of the node. Returns: A tensor of random numbers of the required shape.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py", + "ast_data": "FunctionDef name:uniform_full_int arg:self arg:shape arg:dtype arg:name arguments arg arg arg arg Assign Call With Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_InferGradientReductionAxes", + "source_code": "def _InferGradientReductionAxes(x_shape, y_shape):\n x_rank = x_shape.rank\n y_rank = y_shape.rank\n if x_rank is None or y_rank is None:\n return (None, None)\n x_shape = x_shape.as_list()\n y_shape = y_shape.as_list()\n b_rank = max(x_rank, y_rank)\n x_axes = []\n y_axes = []\n for axis in range(b_rank):\n x_dim = 1 if axis < b_rank - x_rank else x_shape[axis - (b_rank - x_rank)]\n y_dim = 1 if axis < b_rank - y_rank else y_shape[axis - (b_rank - y_rank)]\n if x_dim == 1 and y_dim != 1:\n x_axes.append(axis)\n elif y_dim == 1 and x_dim != 1:\n y_axes.append(axis)\n elif x_dim is None or y_dim is None:\n return (None, None)\n return (x_axes, y_axes)", + "docstring": "Infers the sets of axes that might have been broadcasted.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_InferGradientReductionAxes arg:x_shape arg:y_shape arguments arg arg Assign Assign If BoolOp Compare Compare Return return:no Assign Call Assign Call Assign Call Assign Assign For Call Assign Compare Assign Compare If BoolOp Compare Compare Call If BoolOp Compare Compare Call If BoolOp Compare Compare Return return:no Return return:yes" + }, + { + "library": "tensorflow", + "name": "cast_if_floating_dtype_and_mismatch", + "source_code": "def cast_if_floating_dtype_and_mismatch(targets, outputs):\n if tensor_util.is_tf_type(targets):\n return cast_single_tensor(targets, dtype=outputs[0].dtype)\n new_targets = []\n for target, out in zip(targets, outputs):\n if isinstance(target, np.ndarray):\n target = tensor_conversion.convert_to_tensor_v2_with_dispatch(target)\n if target.dtype != out.dtype:\n new_targets.append(cast_single_tensor(target, dtype=out.dtype))\n else:\n new_targets.append(target)\n return new_targets", + "docstring": "Returns target data tensors using correct datatype. Checks that each target and output pair are the same datatype. If not, casts the target to the output's datatype. Args: targets: tensor or list of targets. outputs: tensor or list of outputs. Returns: Targets in appropriate datatype.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", + "ast_data": "FunctionDef name:cast_if_floating_dtype_and_mismatch arg:targets arg:outputs arguments arg arg If Call Return return:yes Call Assign For Call If Call Assign Call If Compare Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "evaluate_expr", + "source_code": "def evaluate_expr(self, orig_expr: sympy.Basic, hint: Optional[Union[int, bool, float]]=None, fx_node: Optional[torch.fx.Node]=None, size_oblivious: bool=False, fallback_value: Optional[bool]=None, *, forcing_spec: bool=False) -> sympy.Basic:\n suppress_guards_tls = ShapeEnv._suppress_guards_tls()\n return self._inner_evaluate_expr(orig_expr, hint, fx_node, size_oblivious, forcing_spec, suppress_guards_tls, fallback_value)", + "docstring": "Given an expression, evaluates it, adding guards if necessary When fallback_value is not None the function return fallback_value instead of failing with data dependent error.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:evaluate_expr arg:self arg:orig_expr arg:hint arg:fx_node arg:size_oblivious arg:fallback_value arguments arg arg arg arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "pygame", + "name": "draw", + "source_code": "def draw(self, surface, bgsurf=None, special_flags=0):\n sprites = self.sprites()\n if hasattr(surface, 'blits'):\n self.spritedict.update(zip(sprites, surface.blits(((spr.image, spr.rect, None, special_flags) for spr in sprites))))\n else:\n for spr in sprites:\n self.spritedict[spr] = surface.blit(spr.image, spr.rect, None, special_flags)\n self.lostsprites = []\n dirty = self.lostsprites\n return dirty", + "docstring": "draw all sprites onto the surface Group.draw(surface, special_flags=0): return Rect_list Draws all of the member sprites onto the given surface.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:draw arg:self arg:surface arg:bgsurf arg:special_flags arguments arg arg arg arg Assign Call If Call Call Call Call For Assign Call Assign Assign Return return:yes" + }, + { + "library": "kornia", + "name": "_check_Q_matrix", + "source_code": "def _check_Q_matrix(Q_matrix: Tensor) -> None:\n if not isinstance(Q_matrix, Tensor):\n raise StereoException(f\"Expected 'Q_matrix' to be an instance of Tensor but got {type(Q_matrix)}.\")\n if not len(Q_matrix.shape) == 3:\n raise StereoException(f\"Expected 'Q_matrix' to have 3 dimensions. Got {Q_matrix.shape}\")\n if not Q_matrix.shape[1:] == (4, 4):\n raise StereoException(f\"Expected last two dimensions of 'Q_matrix' to be of shape (4, 4). Got {Q_matrix.shape}\")\n if Q_matrix.dtype not in (torch.float16, torch.float32, torch.float64):\n raise StereoException(f\"Expected 'Q_matrix' to be of type torch.float16, torch.float32 or torch.float64. Got {Q_matrix.dtype}\")", + "docstring": "Ensure Q matrix is of correct form. Args: Q_matrix: The Q matrix for reprojecting disparity to a point cloud of shape :math:", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py", + "ast_data": "FunctionDef name:_check_Q_matrix arg:Q_matrix arguments arg If Call Raise Call Call If Compare Call Raise Call If Compare Raise Call If Compare Raise Call" + }, + { + "library": "tensorflow", + "name": "checkpoint", + "source_code": "@property\ndef checkpoint(self):\n return self._checkpoint", + "docstring": "Returns the object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py", + "ast_data": "FunctionDef name:checkpoint arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_intersection", + "source_code": "def get_intersection(cx1, cy1, cos_t1, sin_t1, cx2, cy2, cos_t2, sin_t2):\n line1_rhs = sin_t1 * cx1 - cos_t1 * cy1\n line2_rhs = sin_t2 * cx2 - cos_t2 * cy2\n a, b = (sin_t1, -cos_t1)\n c, d = (sin_t2, -cos_t2)\n ad_bc = a * d - b * c\n if abs(ad_bc) < 1e-12:\n raise ValueError('Given lines do not intersect. Please verify that the angles are not equal or differ by 180 degrees.')\n a_, b_ = (d, -b)\n c_, d_ = (-c, a)\n a_, b_, c_, d_ = (k / ad_bc for k in [a_, b_, c_, d_])\n x = a_ * line1_rhs + b_ * line2_rhs\n y = c_ * line1_rhs + d_ * line2_rhs\n return (x, y)", + "docstring": "Return the intersection between the line through (*cx1*, *cy1*) at angle *t1* and the line through (*cx2*, *cy2*) at angle *t2*.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\bezier.py", + "ast_data": "FunctionDef name:get_intersection arg:cx1 arg:cy1 arg:cos_t1 arg:sin_t1 arg:cx2 arg:cy2 arg:cos_t2 arg:sin_t2 arguments arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign If Compare Call Raise Call Assign Assign Assign Assign Assign Return return:yes" + }, + { + "library": "numpy", + "name": "header_data_from_array_1_0", + "source_code": "@set_module('numpy.lib.format')\ndef header_data_from_array_1_0(array):\n d = {'shape': array.shape}\n if array.flags.c_contiguous:\n d['fortran_order'] = False\n elif array.flags.f_contiguous:\n d['fortran_order'] = True\n else:\n d['fortran_order'] = False\n d['descr'] = dtype_to_descr(array.dtype)\n return d", + "docstring": "Get the dictionary of header metadata from a numpy.ndarray. Parameters ---------- array : numpy.ndarray Returns ------- d : dict This has the appropriate entries for writing its string representation to the header of the file.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_format_impl.py", + "ast_data": "FunctionDef name:header_data_from_array_1_0 arg:array arguments arg Assign If Assign If Assign Assign Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "need_numel_args", + "source_code": "def need_numel_args(self):\n return True", + "docstring": "Indicate whether we need provide numel as arguments for the generated kernel calls in the benchmark. Should be true for pointwise/reduction kernels but false for triton matmul kernels.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py", + "ast_data": "FunctionDef name:need_numel_args arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_apply_device_functions", + "source_code": "def _apply_device_functions(self, op) -> None:\n prior_device_string = None\n for device_spec in self._device_function_stack.peek_objs():\n if device_spec.is_null_merge:\n continue\n if device_spec.function is None:\n break\n device_string = device_spec.string_merge(op)\n if device_string is not prior_device_string:\n op._set_device_from_string(device_string)\n prior_device_string = device_string\n op._device_code_locations = self._snapshot_device_function_stack_metadata()", + "docstring": "Applies the current device function stack to the given operation.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_apply_device_functions arg:self arg:op arguments arg arg Assign For Call If If Compare Assign Call If Compare Call Assign Assign Call" + }, + { + "library": "pytorch", + "name": "load_state_dict", + "source_code": "def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)", + "docstring": "Loads the schedulers state. Note: Remember to restore the state of the data_sparsifier before the scheduler. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_scheduler\\base_data_scheduler.py", + "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Call" + }, + { + "library": "authlib", + "name": "ClientMixin", + "source_code": "class ClientMixin:\n\n def get_client_id(self):\n raise NotImplementedError()\n\n def get_default_redirect_uri(self):\n raise NotImplementedError()\n\n def get_allowed_scope(self, scope):\n raise NotImplementedError()\n\n def check_redirect_uri(self, redirect_uri):\n raise NotImplementedError()\n\n def check_client_secret(self, client_secret):\n raise NotImplementedError()\n\n def check_endpoint_auth_method(self, method, endpoint):\n raise NotImplementedError()\n\n def check_response_type(self, response_type):\n raise NotImplementedError()\n\n def check_grant_type(self, grant_type):\n raise NotImplementedError()", + "docstring": "Implementation of OAuth 2 Client described in _ with some methods to help validation. A client has at least these information: * client_id: A string represents client identifier. * client_secret: A string represents client password. * token_endpoint_auth_method: A way to authenticate client at token endpoint. .. _:", + "type": "class", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py", + "ast_data": "ClassDef name:ClientMixin FunctionDef name:get_client_id arg:self arguments arg Raise Call FunctionDef name:get_default_redirect_uri arg:self arguments arg Raise Call FunctionDef name:get_allowed_scope arg:self arg:scope arguments arg arg Raise Call FunctionDef name:check_redirect_uri arg:self arg:redirect_uri arguments arg arg Raise Call FunctionDef name:check_client_secret arg:self arg:client_secret arguments arg arg Raise Call FunctionDef name:check_endpoint_auth_method arg:self arg:method arg:endpoint arguments arg arg arg Raise Call FunctionDef name:check_response_type arg:self arg:response_type arguments arg arg Raise Call FunctionDef name:check_grant_type arg:self arg:grant_type arguments arg arg Raise Call" + }, + { + "library": "pygame", + "name": "pixels_alpha", + "source_code": "def pixels_alpha(surface):\n return numpy.array(surface.get_view('A'), copy=False)", + "docstring": "pygame.surfarray.pixels_alpha(Surface): return array reference pixel alphas into a 2d array Create a new 2D array that directly references the alpha values (degree of transparency) in a Surface. Any changes to the array will affect the pixels in the Surface. This is a fast operation since no data is copied. This can only work on 32-bit Surfaces with a per-pixel alpha value. The Surface this array references will remain locked for the lifetime of the array.", + "type": "function", + "file_path": "pygame\\src_py\\surfarray.py", + "ast_data": "FunctionDef name:pixels_alpha arg:surface arguments arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, y):\n check_is_fitted(self)\n xp, _ = get_namespace(y)\n y = column_or_1d(y, dtype=self.classes_.dtype, warn=True)\n if _num_samples(y) == 0:\n return xp.asarray([])\n return _encode(y, uniques=self.classes_)", + "docstring": "Transform labels to normalized encoding. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- y : array-like of shape (n_samples,) Labels as normalized encodings.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py", + "ast_data": "FunctionDef name:transform arg:self arg:y arguments arg arg Call Assign Call Assign Call If Compare Call Return return:yes Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, numpoints=None, yoffsets=None, **kwargs):\n super().__init__(numpoints=numpoints, **kwargs)\n self._yoffsets = yoffsets", + "docstring": "Parameters ---------- numpoints : int Number of points to show in legend entry. yoffsets : array of floats Length *numpoints* list of y offsets for each point in legend entry. **kwargs Keyword arguments forwarded to .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:numpoints arg:yoffsets arguments arg arg arg arg Call Call Assign" + }, + { + "library": "pytorch", + "name": "register_ddp", + "source_code": "@abstractmethod\ndef register_ddp(self, ddp: DistributedDataParallel) -> None:\n raise NotImplementedError(f'{self.__class__.__name__} does not support overlapped DDP.')", + "docstring": "Registers the overlapped optimizer with DDP.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\algorithms\\_optimizer_overlap\\optimizer_overlap.py", + "ast_data": "FunctionDef name:register_ddp arg:self arg:ddp arguments arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "_toposort", + "source_code": "def _toposort(edges):\n incoming_edges = reverse_dict(edges)\n incoming_edges = OrderedDict(((k, set(val)) for k, val in incoming_edges.items()))\n S = OrderedDict.fromkeys((v for v in edges if v not in incoming_edges))\n L = []\n while S:\n n, _ = S.popitem()\n L.append(n)\n for m in edges.get(n, ()):\n assert n in incoming_edges[m]\n incoming_edges[m].remove(n)\n if not incoming_edges[m]:\n S[m] = None\n if any((incoming_edges.get(v, None) for v in edges)):\n raise ValueError('Input has cycles')\n return L", + "docstring": "Topological sort algorithm by Kahn [1] - O(nodes + vertices) inputs: edges - a dict of the form {a: {b, c}} where b and c depend on a outputs: L - an ordered list of nodes that satisfy the dependencies of edges >>> _toposort({1: (2, 3), 2: (3,)}) [1, 2, 3] >>> # Closely follows the wikipedia page [2] >>> # [1] Kahn, Arthur B. (1962), \"Topological sorting of large networks\", >>> # Communications of the ACM >>> # [2]", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\utils.py", + "ast_data": "FunctionDef name:_toposort arg:edges arguments arg Assign Call Assign Call Call Call Assign Call Compare Assign While Assign Call Call For Call Compare Call If Assign If Call Call Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_scope", + "source_code": "def _scope(self, strategy):\n return _DefaultDistributionContext(strategy)", + "docstring": "Context manager setting a variable creator and as current.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:_scope arg:self arg:strategy arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "unsafe_scalar_trace", + "source_code": "@staticmethod\ndef unsafe_scalar_trace(op):\n if op.type in ('LoopCond', 'Enter', 'Merge', 'Const', 'Switch', 'Less', 'ReadVariableOp'):\n return True\n if op.type in ('VarHandleOp', 'IteratorToStringHandle', 'IteratorGetNext', 'OneShotIterator', 'IteratorV2', 'MakeIterator', 'BatchDatasetV2', 'MapDataset', 'FixedLengthRecordDataset', 'TakeDataset', 'ZipDataset', 'Placeholder', 'PlaceholderWithDefault', 'StridedSlice'):\n return True\n return False", + "docstring": "Return true if scalar output tensor from Op is not safe to be traced.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:unsafe_scalar_trace arg:op arguments arg If Compare Return return:yes If Compare Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_sort_zpos", + "source_code": "def set_sort_zpos(self, val):\n self._sort_zpos = val\n self.stale = True", + "docstring": "Set the position to use for z-sorting.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:set_sort_zpos arg:self arg:val arguments arg arg Assign Assign" + }, + { + "library": "pytorch", + "name": "IterationRanges", + "source_code": "@dataclasses.dataclass\nclass IterationRanges:\n\n def __init__(self, name: str, var_list: list[sympy.Symbol], var_ranges: dict[sympy.Symbol, sympy.Expr], numel: sympy.Expr, prefix: str, *, kernel: SIMDKernel, divisor=sympy.S.One, length=sympy.S.One, root: IterationRangesRoot) -> None:\n super().__init__()\n self.name = name\n self.var_list = var_list\n self.var_ranges = var_ranges\n self.numel = numel\n self.prefix = prefix\n self.divisor = divisor\n self.length = length\n self.kernel = kernel\n self.root = root\n\n @property\n @cache_on_self\n @no_type_check\n def is_reduction(self) -> bool:\n return prefix_is_reduction(self.prefix)\n\n def symbol(self) -> sympy.Symbol:\n return sympy_index_symbol(self.name)\n\n @property\n @cache_on_self\n @no_type_check\n def symt(self) -> SymT:\n prefix_to_symt = {prefix: symt for symt, prefix in prefix_str.items()}\n return prefix_to_symt[self.prefix]", + "docstring": "Each range tree represents multiple sets of iteration indexing in a single tiled dimension in the output kernel. If you have two loops ranges one (4, 3, 2) and another (4, 6), then the range tree will be: 4 (i0) 3 (i1) 6 (i3) 2 (i2) Where i0 is shared between both loops, but then the split into different indexing vars. All loop ranges must iterate over the same number of elements.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py", + "ast_data": "ClassDef name:IterationRanges FunctionDef name:__init__ arg:self arg:name arg:var_list arg:var_ranges arg:numel arg:prefix arguments arg arg arg arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign Assign Assign Assign FunctionDef name:is_reduction arg:self arguments arg Return return:yes Call FunctionDef name:symbol arg:self arguments arg Return return:yes Call FunctionDef name:symt arg:self arguments arg Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "_find_nonzero_rows", + "source_code": "def _find_nonzero_rows(A, tol):\n return np.any(np.abs(A) > tol, axis=1)", + "docstring": "Returns logical array indicating the locations of rows with at least one nonzero element.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_linprog_rs.py", + "ast_data": "FunctionDef name:_find_nonzero_rows arg:A arg:tol arguments arg arg Return return:yes Call Compare Call" + }, + { + "library": "pytorch", + "name": "_ensure_function_events", + "source_code": "def _ensure_function_events(self):\n if self._function_events is not None:\n return\n self._needs_processing = False\n t0 = perf_counter_ns()\n parsed_results = []\n if self.kineto_results:\n parsed_results = self._parse_kineto_results(self.kineto_results)\n t1 = perf_counter_ns()\n self._stats.parse_kineto_call_duration_us = int((t1 - t0) / 1000)\n self._function_events = EventList(parsed_results, use_device=self.use_device, profile_memory=self.profile_memory, with_flops=self.with_flops)\n t0 = perf_counter_ns()\n self._function_events._build_tree()\n t1 = perf_counter_ns()\n self._stats.function_events_build_tree_call_duration_us = int((t1 - t0) / 1000)\n self._stats.number_of_events = len(self._function_events)\n if self._old_function_events and self.acc_events:\n for evt in self._old_function_events:\n self._function_events.append(evt)\n self._old_function_events = None\n if self._function_events is None:\n raise RuntimeError(\"Profiler didn't finish running\")", + "docstring": "Process function events lazily if required", + "type": "method", + "file_path": "pytorch\\torch\\autograd\\profiler.py", + "ast_data": "FunctionDef name:_ensure_function_events arg:self arguments arg If Compare Return return:no Assign Assign Call Assign If Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call If BoolOp For Call Assign If Compare Raise Call" + }, + { + "library": "pandas", + "name": "describe_categorical_1d", + "source_code": "def describe_categorical_1d(data: Series, percentiles_ignored: Sequence[float]) -> Series:\n names = ['count', 'unique', 'top', 'freq']\n objcounts = data.value_counts()\n count_unique = len(objcounts[objcounts != 0])\n if count_unique > 0:\n top, freq = (objcounts.index[0], objcounts.iloc[0])\n dtype = None\n else:\n top, freq = (np.nan, np.nan)\n dtype = 'object'\n result = [data.count(), count_unique, top, freq]\n from pandas import Series\n return Series(result, index=names, name=data.name, dtype=dtype)", + "docstring": "Describe series containing categorical data. Parameters ---------- data : Series Series to be described. percentiles_ignored : list-like of numbers Ignored, but in place to unify interface.", + "type": "function", + "file_path": "pandas\\pandas\\core\\methods\\describe.py", + "ast_data": "FunctionDef name:describe_categorical_1d arg:data arg:percentiles_ignored arguments arg arg Assign Assign Call Assign Call Compare If Compare Assign Assign Assign Assign Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_tmp_dir_for_key", + "source_code": "@classmethod\ndef _get_tmp_dir_for_key(cls: type[AOTAutogradCache], key) -> str:\n return os.path.join(cls._get_tmp_dir(), key)", + "docstring": "Get the toplevel temporary directory for storing compiled graphs.", + "type": "method", + "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py", + "ast_data": "FunctionDef name:_get_tmp_dir_for_key arg:cls arg:key arguments arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "DejaVuSansFonts", + "source_code": "class DejaVuSansFonts(DejaVuFonts):\n _fontmap = {'rm': 'DejaVu Sans', 'it': 'DejaVu Sans:italic', 'bf': 'DejaVu Sans:weight=bold', 'bfit': 'DejaVu Sans:italic:bold', 'sf': 'DejaVu Sans', 'tt': 'DejaVu Sans Mono', 'ex': 'DejaVu Sans Display', 0: 'DejaVu Sans'}", + "docstring": "A font handling class for the DejaVu Sans fonts If a glyph is not found it will fallback to Stix Sans", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py", + "ast_data": "ClassDef name:DejaVuSansFonts Assign" + }, + { + "library": "tensorflow", + "name": "_tf_tensor_list_stack", + "source_code": "def _tf_tensor_list_stack(list_, opts):\n if opts.element_dtype is None:\n raise ValueError('cannot stack a list without knowing its element type; use set_element_type to annotate it')\n return list_ops.tensor_list_stack(list_, element_dtype=opts.element_dtype)", + "docstring": "Overload of list_stack that stages a Tensor list write.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\data_structures.py", + "ast_data": "FunctionDef name:_tf_tensor_list_stack arg:list_ arg:opts arguments arg arg If Compare Raise Call Return return:yes Call" + }, + { + "library": "seaborn", + "name": "crayon_palette", + "source_code": "def crayon_palette(colors):\n palette = [crayons[name] for name in colors]\n return color_palette(palette, len(palette))", + "docstring": "Make a palette with color names from Crayola crayons. Colors are taken from here: This is just a simple wrapper around the dictionary. Parameters ---------- colors : list of strings List of keys in the dictionary. Returns ------- palette A list of colors as RGB tuples. See Also -------- xkcd_palette : Make a palette with named colors from the XKCD color survey.", + "type": "function", + "file_path": "seaborn\\seaborn\\palettes.py", + "ast_data": "FunctionDef name:crayon_palette arg:colors arguments arg Assign Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "transform", + "source_code": "def transform(self, X):\n check_is_fitted(self)\n if not self._precomputed:\n X = self._validate_input(X, in_fit=False)\n elif not (hasattr(X, 'dtype') and X.dtype.kind == 'b'):\n raise ValueError('precomputed is True but the input data is not a mask')\n imputer_mask, features = self._get_missing_features_info(X)\n if self.features == 'missing-only':\n features_diff_fit_trans = np.setdiff1d(features, self.features_)\n if self.error_on_new and features_diff_fit_trans.size > 0:\n raise ValueError('The features {} have missing values in transform but have no missing values in fit.'.format(features_diff_fit_trans))\n if self.features_.size < self._n_features:\n imputer_mask = imputer_mask[:, self.features_]\n return imputer_mask", + "docstring": "Generate missing values indicator for . Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data to complete. Returns ------- Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_features_with_missing) The missing indicator for input data. The data type of will be boolean.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\impute\\_base.py", + "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call If Assign Call If BoolOp Call Compare Raise Call Assign Call If Compare Assign Call If BoolOp Compare Raise Call Call If Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_CheckpointFilename", + "source_code": "def _CheckpointFilename(self, p):\n name, _ = p\n return name", + "docstring": "Returns the checkpoint filename given a pair. Args: p: (filename, time) pair. Returns: Checkpoint file name.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py", + "ast_data": "FunctionDef name:_CheckpointFilename arg:self arg:p arguments arg arg Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_offsets", + "source_code": "def get_offsets(self):\n return np.zeros((1, 2)) if self._offsets is None else self._offsets", + "docstring": "Return the offsets for the collection.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:get_offsets arg:self arguments arg Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "_normalize_feature_columns", + "source_code": "def _normalize_feature_columns(feature_columns):\n if isinstance(feature_columns, _FeatureColumn):\n feature_columns = [feature_columns]\n if isinstance(feature_columns, collections_abc.Iterator):\n feature_columns = list(feature_columns)\n if isinstance(feature_columns, dict):\n raise ValueError('Expected feature_columns to be iterable, found dict.')\n for column in feature_columns:\n if not isinstance(column, _FeatureColumn):\n raise ValueError('Items of feature_columns must be a _FeatureColumn. Given (type {}): {}.'.format(type(column), column))\n if not feature_columns:\n raise ValueError('feature_columns must not be empty.')\n name_to_column = {}\n for column in feature_columns:\n if column.name in name_to_column:\n raise ValueError('Duplicate feature column name found for columns: {} and {}. This usually means that these columns refer to same base feature. Either one must be discarded or a duplicated but renamed item must be inserted in features dict.'.format(column, name_to_column[column.name]))\n name_to_column[column.name] = column\n return feature_columns", + "docstring": "Normalizes the input. This method converts the to list type as best as it can. In addition, verifies the type and other parts of feature_columns, required by downstream library. Args: feature_columns: The raw feature columns, usually passed by users. Returns: The normalized feature column list. Raises: ValueError: for any invalid inputs, such as empty, duplicated names, etc.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_normalize_feature_columns arg:feature_columns arguments arg If Call Assign If Call Assign Call If Call Raise Call For If Call Raise Call Call Call If Raise Call Assign For If Compare Raise Call Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "_calc_oa_lens", + "source_code": "def _calc_oa_lens(s1, s2):\n fallback = (s1 + s2 - 1, None, s1, s2)\n if s1 == s2 or s1 == 1 or s2 == 1:\n return fallback\n if s2 > s1:\n s1, s2 = (s2, s1)\n swapped = True\n else:\n swapped = False\n if s2 >= s1 / 2:\n return fallback\n overlap = s2 - 1\n opt_size = -overlap * lambertw(-1 / (2 * math.e * overlap), k=-1).real\n block_size = sp_fft.next_fast_len(math.ceil(opt_size))\n if block_size >= s1:\n return fallback\n if not swapped:\n in1_step = block_size - s2 + 1\n in2_step = s2\n else:\n in1_step = s2\n in2_step = block_size - s2 + 1\n return (block_size, overlap, in1_step, in2_step)", + "docstring": "Calculate the optimal FFT lengths for overlap-add convolution. The calculation is done for a single dimension. Parameters ---------- s1 : int Size of the dimension for the first array. s2 : int Size of the dimension for the second array. Returns ------- block_size : int The size of the FFT blocks. overlap : int The amount of overlap between two blocks. in1_step : int The size of each step for the first array. in2_step : int The size of each step for the first array.", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_signaltools.py", + "ast_data": "FunctionDef name:_calc_oa_lens arg:s1 arg:s2 arguments arg arg Assign If BoolOp Compare Compare Compare Return return:yes If Compare Assign Assign Assign If Compare Return return:yes Assign Assign Call Assign Call Call If Compare Return return:yes If Assign Assign Assign Assign Return return:yes" + }, + { + "library": "kornia", + "name": "depth_to_3d_v2", + "source_code": "def depth_to_3d_v2(depth: Tensor, camera_matrix: Tensor, normalize_points: bool=False, xyz_grid: Optional[Tensor]=None) -> Tensor:\n KORNIA_CHECK_SHAPE(depth, ['*', 'H', 'W'])\n KORNIA_CHECK_SHAPE(camera_matrix, ['*', '3', '3'])\n height, width = depth.shape[-2:]\n points_xyz: Tensor = xyz_grid or unproject_meshgrid(height, width, camera_matrix, normalize_points, depth.device, depth.dtype)\n KORNIA_CHECK_SHAPE(points_xyz, ['*', 'H', 'W', '3'])\n return points_xyz * depth[..., None]", + "docstring": "Compute a 3d point per pixel given its depth value and the camera intrinsics. .. note:: This is an alternative implementation of :py:func: that does not require the creation of a meshgrid. Args: depth: image tensor containing a depth value per pixel with shape :math:. camera_matrix: tensor containing the camera intrinsics with shape :math:. normalize_points: whether to normalise the pointcloud. This must be set to when the depth is represented as the Euclidean ray length from the camera position. xyz_grid: explicit xyz point values. Return: tensor with a 3d point per pixel of the same resolution as the input :math:. Example: >>> depth = torch.rand(4, 4) >>> K = torch.eye(3).repeat(2,1,1) >>> depth_to_3d_v2(depth, K).shape torch.Size([2, 4, 4, 3])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\depth.py", + "ast_data": "FunctionDef name:depth_to_3d_v2 arg:depth arg:camera_matrix arg:normalize_points arg:xyz_grid arguments arg arg arg arg Call Call Assign BoolOp Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "no_broadcast_dim_with_index", + "source_code": "def no_broadcast_dim_with_index(d1: list[DVar], d2: list[DVar], d3: list[DVar], d4: list[DVar], i: int):\n return Conj([Disj([Conj([BinConstraintD(d1[i], 1, op_eq), BinConstraintD(d2[i], 1, op_eq)]), Conj([BinConstraintD(d1[i], 1, op_neq), BinConstraintD(d2[i], 1, op_neq)])]), BinConstraintD(d1[i], d3[i], op_eq), BinConstraintD(d2[i], d4[i], op_eq)])", + "docstring": "Args: d1: input 1 d2: input 2 d3: simulated broadcasting for input 1 d4: simulated broadcasting for input 2 i: the rank of the resulting tensor addition Returns: Constraints for when no broadcasting occurs", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", + "ast_data": "FunctionDef name:no_broadcast_dim_with_index arg:d1 arg:d2 arg:d3 arg:d4 arg:i arguments arg arg arg arg arg Return return:yes Call Call Call Call Call Call Call Call Call Call" + }, + { + "library": "django", + "name": "formatted_description", + "source_code": "def formatted_description(self):\n description = self.describe()\n if self.category is None:\n return f'{OperationCategory.MIXED.value} {description}'\n return f'{self.category.value} {description}'", + "docstring": "Output a description prefixed by a category symbol.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\operations\\base.py", + "ast_data": "FunctionDef name:formatted_description arg:self arguments arg Assign Call If Compare Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "format_for_duration_arithmetic", + "source_code": "def format_for_duration_arithmetic(self, sql):\n return sql", + "docstring": "Do nothing since formatting is handled in the custom function.", + "type": "method", + "file_path": "django\\django\\db\\backends\\sqlite3\\operations.py", + "ast_data": "FunctionDef name:format_for_duration_arithmetic arg:self arg:sql arguments arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "PolynomialLR", + "source_code": "class PolynomialLR(LRScheduler):\n\n def __init__(self, optimizer: Optimizer, total_iters: int=5, power: float=1.0, last_epoch: int=-1) -> None:\n self.total_iters = total_iters\n self.power = power\n super().__init__(optimizer, last_epoch)\n\n @override\n def get_lr(self) -> list[float]:\n _warn_get_lr_called_within_step(self)\n if self._is_initial or self.last_epoch > self.total_iters:\n return [group['lr'] for group in self.optimizer.param_groups]\n decay_factor = ((1.0 - self.last_epoch / self.total_iters) / (1.0 - (self.last_epoch - 1) / self.total_iters)) ** self.power\n return [group['lr'] * decay_factor for group in self.optimizer.param_groups]\n\n def _get_closed_form_lr(self):\n return [base_lr * (1.0 - min(self.total_iters, self.last_epoch) / self.total_iters) ** self.power for base_lr in self.base_lrs]", + "docstring": "Decays the learning rate of each parameter group using a polynomial function in the given total_iters. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. total_iters (int): The number of steps that the scheduler decays the learning rate. Default: 5. power (float): The power of the polynomial. Default: 1.0. Example: >>> # xdoctest: +SKIP(\"undefined vars\") >>> # Assuming optimizer uses lr = 0.05 for all groups >>> # lr = 0.0490 if epoch == 0 >>> # lr = 0.0481 if epoch == 1 >>> # lr = 0.0472 if epoch == 2 >>> # ... >>> # lr = 0.0 if epoch >= 50 >>> scheduler = PolynomialLR(optimizer, total_iters=50, power=0.9) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() .. image:: ../scripts/lr_scheduler_images/PolynomialLR.png", + "type": "class", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "ClassDef name:PolynomialLR FunctionDef name:__init__ arg:self arg:optimizer arg:total_iters arg:power arg:last_epoch arguments arg arg arg arg arg Assign Assign Call Call FunctionDef name:get_lr arg:self arguments arg Call If BoolOp Compare Return return:yes Assign Return return:yes FunctionDef name:_get_closed_form_lr arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_save_cached_when_graph_building", + "source_code": "def _save_cached_when_graph_building(self, file_prefix, object_graph_tensor, options, update_ckpt_state=False):\n named_saveable_objects, graph_proto, feed_additions, unused_registered_savers = self._gather_saveables(object_graph_tensor=object_graph_tensor)\n if self._last_save_object_graph != graph_proto or context.executing_eagerly() or ops.inside_function():\n saver = _DSaver(self._mesh, named_saveable_objects)\n save_op = saver.save(file_prefix, options=options)\n with ops.device('/cpu:0'):\n with ops.control_dependencies([save_op]):\n self._cached_save_operation = array_ops.identity(file_prefix)\n self._last_save_object_graph = graph_proto\n return (self._cached_save_operation, feed_additions)", + "docstring": "Create or retrieve save ops, overrides parents's private method. Args: file_prefix: The prefix for saved checkpoint files. object_graph_tensor: A to which the current object graph will be fed. options: object. update_ckpt_state: Optional bool flag. Indiciate whether the internal checkpoint state needs to be updated. This is used for async checkpoint, which DTrackableSaver currently does not support. TODO(chienchunh): Implement async checkpoint for DTrackableSaver. Returns: A two-element tuple with a filename tensor and a feed_dict of tensors to feed when running it (if graph building). The feed dict contains the current object graph and any Python state to be saved in the checkpoint. When executing eagerly only the first argument is meaningful.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\d_checkpoint.py", + "ast_data": "FunctionDef name:_save_cached_when_graph_building arg:self arg:file_prefix arg:object_graph_tensor arg:options arg:update_ckpt_state arguments arg arg arg arg arg Assign Call If BoolOp Compare Call Call Assign Call Assign Call With Call With Call Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_default_getter", + "source_code": "def _default_getter(name, shape, dtype, initializer=None, partition_info=None, **kwargs):\n dtype = dtypes.as_dtype(dtype)\n shape_object = tensor_shape.as_shape(shape)\n with ops.init_scope():\n if initializer is None:\n initializer, initializing_from_value = variable_scope._get_default_variable_store()._get_default_initializer(name=name, shape=shape_object, dtype=dtype)\n else:\n initializing_from_value = not callable(initializer)\n variable_dtype = dtype.base_dtype\n if initializing_from_value:\n if shape is not None:\n raise ValueError('If initializer is a constant, do not specify shape.')\n initial_value = initializer\n else:\n if isinstance(initializer, type(init_ops.Initializer)):\n initializer = initializer(dtype=dtype)\n shape_list = None if shape is None else shape_object.as_list()\n if 'partition_info' in tf_inspect.getargspec(initializer).args:\n initial_value = functools.partial(initializer, shape_list, dtype=dtype, partition_info=partition_info)\n else:\n initial_value = functools.partial(initializer, shape_list, dtype=dtype)\n return variable_v1.VariableV1(initial_value=initial_value, name=name, dtype=variable_dtype, use_resource=True, **kwargs)", + "docstring": "A pared-down version of get_variable which does not reuse variables.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:_default_getter arg:name arg:shape arg:dtype arg:initializer arg:partition_info arguments arg arg arg arg arg arg Assign Call Assign Call With Call If Compare Assign Call Call Assign Call Assign If If Compare Raise Call Assign If Call Call Assign Call Assign Compare Call If Compare Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "scale_factory", + "source_code": "def scale_factory(scale, axis, **kwargs):\n scale_cls = _api.check_getitem(_scale_mapping, scale=scale)\n return scale_cls(axis, **kwargs)", + "docstring": "Return a scale class by name. Parameters ---------- scale : {%(names)s} axis :", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\scale.py", + "ast_data": "FunctionDef name:scale_factory arg:scale arg:axis arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "partial_run_setup", + "source_code": "def partial_run_setup(self, fetches, feeds=None):\n raise NotImplementedError('partial_run_setup')", + "docstring": "Sets up the feeds and fetches for partial runs in the session.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", + "ast_data": "FunctionDef name:partial_run_setup arg:self arg:fetches arg:feeds arguments arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "all_gather_v2", + "source_code": "def all_gather_v2(t, group_size, group_key, instance_key, communication_hint='auto', timeout=0, ordering_token=None, name=None):\n if ordering_token is not None:\n ordering_token = [ordering_token]\n else:\n ordering_token = []\n return gen_collective_ops.collective_gather_v2(t, group_size=group_size, group_key=group_key, instance_key=instance_key, communication_hint=communication_hint.lower(), timeout_seconds=timeout, is_stateless=False, ordering_token=ordering_token, name=name)", + "docstring": "Accumulates tensors collectively, across devices, along first dimension. Args: t: the tensor to participate in the accumulation. group_size: an int32 tensor, the total number of tensors to be collectively accumulated. Each must reside on a different device. Should be a positive integer. group_key: an int32 tensor identifying the group of devices. instance_key: an int32 tensor identifying the participating group of Ops. communication_hint: preferred collective communication. The implementation may fall back to another mechanism. Options include , , and . timeout: a float. If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. ordering_token: a resource tensor on the same device as the op to order the collectives in a per-device manner by auto control dependency. This argument can be omited when there is one collective Op per , or when explicit control dependency is used instead of auto control dependency. name: name of the Op. Returns: An Op implementing the distributed operation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\collective_ops.py", + "ast_data": "FunctionDef name:all_gather_v2 arg:t arg:group_size arg:group_key arg:instance_key arg:communication_hint arg:timeout arg:ordering_token arg:name arguments arg arg arg arg arg arg arg arg If Compare Assign Assign Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "SymLogNorm", + "source_code": "@make_norm_from_scale(scale.SymmetricalLogScale, init=lambda linthresh, linscale=1.0, vmin=None, vmax=None, clip=False, *, base=10: None)\nclass SymLogNorm(Normalize):\n\n @property\n def linthresh(self):\n return self._scale.linthresh\n\n @linthresh.setter\n def linthresh(self, value):\n self._scale.linthresh = value", + "docstring": "The symmetrical logarithmic scale is logarithmic in both the positive and negative directions from the origin. Since the values close to zero tend toward infinity, there is a need to have a range around zero that is linear. The parameter *linthresh* allows the user to specify the size of this range (-*linthresh*, *linthresh*). Parameters ---------- linthresh : float The range within which the plot is linear (to avoid having the plot go to infinity around zero). linscale : float, default: 1 This allows the linear range (-*linthresh* to *linthresh*) to be stretched relative to the logarithmic range. Its value is the number of decades to use for each half of the linear range. For example, when *linscale* == 1.0 (the default), the space used for the positive and negative halves of the linear range will be equal to one decade in the logarithmic range. base : float, default: 10", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "ClassDef name:SymLogNorm FunctionDef name:linthresh arg:self arguments arg Return return:yes FunctionDef name:linthresh arg:self arg:value arguments arg arg Assign Call arguments arg arg arg arg arg arg" + }, + { + "library": "django", + "name": "savepoint_commit", + "source_code": "@async_unsafe\ndef savepoint_commit(self, sid):\n if not self._savepoint_allowed():\n return\n self.validate_thread_sharing()\n self._savepoint_commit(sid)", + "docstring": "Release a savepoint. Do nothing if savepoints are not supported.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\base.py", + "ast_data": "FunctionDef name:savepoint_commit arg:self arg:sid arguments arg arg If Call Return return:no Call Call" + }, + { + "library": "django", + "name": "localize_input", + "source_code": "def localize_input(value, default=None):\n if isinstance(value, str):\n return value\n elif isinstance(value, bool):\n return str(value)\n elif isinstance(value, (decimal.Decimal, float, int)):\n return number_format(value)\n elif isinstance(value, datetime.datetime):\n format = default or get_format('DATETIME_INPUT_FORMATS')[0]\n format = sanitize_strftime_format(format)\n return value.strftime(format)\n elif isinstance(value, datetime.date):\n format = default or get_format('DATE_INPUT_FORMATS')[0]\n format = sanitize_strftime_format(format)\n return value.strftime(format)\n elif isinstance(value, datetime.time):\n format = default or get_format('TIME_INPUT_FORMATS')[0]\n return value.strftime(format)\n return value", + "docstring": "Check if an input value is a localizable type and return it formatted with the appropriate formatting string of the current locale.", + "type": "function", + "file_path": "django\\django\\utils\\formats.py", + "ast_data": "FunctionDef name:localize_input arg:value arg:default arguments arg arg If Call Return return:yes If Call Return return:yes Call If Call Return return:yes Call If Call Assign BoolOp Call Assign Call Return return:yes Call If Call Assign BoolOp Call Assign Call Return return:yes Call If Call Assign BoolOp Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_fftn", + "source_code": "def _fftn(input_tensor, fft_length=None, axes=None, norm=None, name=None):\n with _ops.name_scope(name, default_name, [input_tensor, fft_length, axes]) as name:\n axes = _process_empty_axes(input_tensor, axes)\n fft_rank = axes.shape[0]\n input_tensor = _ops.convert_to_tensor(input_tensor, preferred_dtype=_dtypes.complex64)\n input_tensor.shape.with_rank_at_least(fft_rank)\n if fft_length is None:\n fft_length = _infer_fft_length_for_fftn(input_tensor)\n else:\n fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)\n input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length)\n fft_length_static = _tensor_util.constant_value(fft_length)\n if fft_length_static is not None:\n fft_length = fft_length_static\n if norm is None:\n norm = 'backward'\n n = 1\n if norm != 'backward':\n for fft_length_i in fft_length:\n n *= fft_length_i\n if norm == 'forward':\n input_tensor /= n\n elif norm == 'ortho':\n input_tensor /= np.sqrt(n)\n return fft_n(input_tensor, fft_length, axes, name=name)", + "docstring": "Wrapper around gen_spectral_ops.*fft that infers fft_length and axes arguments.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\fft_ops.py", + "ast_data": "FunctionDef name:_fftn arg:input_tensor arg:fft_length arg:axes arg:norm arg:name arguments arg arg arg arg arg With Call Assign Call Assign Assign Call Call If Compare Assign Call Assign Call Assign Call Assign Call If Compare Assign If Compare Assign Assign If Compare For If Compare If Compare Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__tf_tensor__", + "source_code": "def __tf_tensor__(self, dtype=None, name=None) -> NoReturn:\n raise TypeError(\"can't convert Operation '{}' to Tensor\".format(self.name))", + "docstring": "Raises a helpful error.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:__tf_tensor__ arg:self arg:dtype arg:name arguments arg arg arg Raise Call Call" + }, + { + "library": "scipy", + "name": "_matmat", + "source_code": "def _matmat(self, X):\n s, y, n_corrs, rho = (self.sk, self.yk, self.n_corrs, self.rho)\n Q = np.array(X, dtype=self.dtype, copy=True)\n alpha = np.empty((n_corrs, Q.shape[1]))\n for i in range(n_corrs - 1, -1, -1):\n alpha[i] = rho[i] * np.dot(s[i], Q)\n Q -= alpha[i] * y[i][:, np.newaxis]\n R = Q\n for i in range(n_corrs):\n beta = rho[i] * np.dot(y[i], R)\n R += s[i][:, np.newaxis] * (alpha[i] - beta)\n return R", + "docstring": "Efficient matrix-matrix multiply with the BFGS matrices. This calculation is described in Section (4) of [1]. Parameters ---------- X : ndarray An array with shape (n,m) Returns ------- Y : ndarray The matrix-matrix product Notes ----- This implementation is written starting from _matvec and broadcasting all expressions along the second axis of X.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_lbfgsb_py.py", + "ast_data": "FunctionDef name:_matmat arg:self arg:X arguments arg arg Assign Assign Call Assign Call For Call Assign Call Assign For Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "shareview", + "source_code": "def shareview(self, other):\n _api.check_isinstance(Axes3D, other=other)\n if self._shareview is not None and other is not self._shareview:\n raise ValueError('view angles are already shared')\n self._shared_axes['view'].join(self, other)\n self._shareview = other\n vertical_axis = self._axis_names[other._vertical_axis]\n self.view_init(elev=other.elev, azim=other.azim, roll=other.roll, vertical_axis=vertical_axis, share=True)", + "docstring": "Share the view angles with *other*. This is equivalent to passing `` when constructing the Axes, and cannot be used if the view angles are already being shared with another Axes. Note that it is not possible to unshare axes.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", + "ast_data": "FunctionDef name:shareview arg:self arg:other arguments arg arg Call If BoolOp Compare Compare Raise Call Call Assign Assign Call" + }, + { + "library": "scipy", + "name": "_chunk_evaluator", + "source_code": "def _chunk_evaluator(self, x, y, shift, scale, coeffs, memory_budget=1000000):\n nx, ndim = x.shape\n if self.neighbors is None:\n nnei = len(y)\n else:\n nnei = self.neighbors\n chunksize = memory_budget // (self.powers.shape[0] + nnei) + 1\n if chunksize <= nx:\n out = np.empty((nx, self.d.shape[1]), dtype=float)\n for i in range(0, nx, chunksize):\n vec = _build_evaluation_coefficients(x[i:i + chunksize, :], y, self.kernel, self.epsilon, self.powers, shift, scale)\n out[i:i + chunksize, :] = np.dot(vec, coeffs)\n else:\n vec = _build_evaluation_coefficients(x, y, self.kernel, self.epsilon, self.powers, shift, scale)\n out = np.dot(vec, coeffs)\n return out", + "docstring": "Evaluate the interpolation while controlling memory consumption. We chunk the input if we need more memory than specified. Parameters ---------- x : (Q, N) float ndarray array of points on which to evaluate y: (P, N) float ndarray array of points on which we know function values shift: (N, ) ndarray Domain shift used to create the polynomial matrix. scale : (N,) float ndarray Domain scaling used to create the polynomial matrix. coeffs: (P+R, S) float ndarray Coefficients in front of basis functions memory_budget: int Total amount of memory (in units of sizeof(float)) we wish to devote for storing the array of coefficients for interpolated points. If we need more memory than that, we chunk the input. Returns ------- (Q, S) float ndarray Interpolated array", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_rbfinterp.py", + "ast_data": "FunctionDef name:_chunk_evaluator arg:self arg:x arg:y arg:shift arg:scale arg:coeffs arg:memory_budget arguments arg arg arg arg arg arg arg Assign If Compare Assign Call Assign Assign If Compare Assign Call For Call Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "saved_model", + "source_code": "@property\ndef saved_model(self):\n return self._saved_model", + "docstring": "SavedModel object parsed from the export directory.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py", + "ast_data": "FunctionDef name:saved_model arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "translate", + "source_code": "def translate(self, size: Tensor, method: str='warp', inplace: bool=False) -> Boxes:\n if method == 'fast':\n raise NotImplementedError\n elif method == 'warp':\n pass\n else:\n raise NotImplementedError\n M: Tensor = eye_like(3, size)\n M[:, :2, 2] = size\n return self.transform_boxes(M, inplace=inplace)", + "docstring": "Translate boxes by the provided size. Args: size: translate size for x, y direction, shape of :math:. method: \"warp\" or \"fast\". inplace: do transform in-place and return self. Returns: The transformed boxes.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\boxes.py", + "ast_data": "FunctionDef name:translate arg:self arg:size arg:method arg:inplace arguments arg arg arg arg If Compare Raise If Compare Raise Call Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "poles", + "source_code": "def poles(self):\n if self._poles is None:\n m = self.weights.size\n B = np.eye(m + 1, dtype=self.weights.dtype)\n B[0, 0] = 0\n E = np.zeros_like(B, dtype=np.result_type(self.weights, self._support_points))\n E[0, 1:] = self.weights\n E[1:, 0] = 1\n np.fill_diagonal(E[1:, 1:], self._support_points)\n pol = scipy.linalg.eigvals(E, B)\n self._poles = pol[np.isfinite(pol)]\n return self._poles", + "docstring": "Compute the poles of the rational approximation. Returns ------- poles : array Poles of the AAA approximation, repeated according to their multiplicity but not in any specific order.", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_bary_rational.py", + "ast_data": "FunctionDef name:poles arg:self arguments arg If Compare Assign Assign Call Assign Assign Call Call Assign Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "row_splits_to_segment_ids", + "source_code": "@tf_export('ragged.row_splits_to_segment_ids')\n@dispatch.add_dispatch_support\ndef row_splits_to_segment_ids(splits, name=None, out_type=None):\n with ops.name_scope(name, 'RaggedSplitsToSegmentIds', [splits]) as name:\n splits = ops.convert_to_tensor(splits, name='splits', preferred_dtype=dtypes.int64)\n if splits.dtype not in (dtypes.int32, dtypes.int64):\n raise ValueError('splits must have dtype int32 or int64')\n splits.shape.assert_has_rank(1)\n if tensor_shape.dimension_value(splits.shape[0]) == 0:\n raise ValueError('Invalid row_splits: []')\n if out_type is None:\n out_type = splits.dtype\n else:\n out_type = dtypes.as_dtype(out_type)\n row_lengths = splits[1:] - splits[:-1]\n nrows = array_ops.shape(splits, out_type=out_type)[-1] - 1\n indices = math_ops.range(nrows)\n return ragged_util.repeat(indices, repeats=row_lengths, axis=0)", + "docstring": "Generates the segmentation corresponding to a RaggedTensor . Returns an integer vector , where if splits[0]splits.dtypetf.int64splitsshape=[splits[-1]]splits` is invalid.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\segment_id_ops.py", + "ast_data": "FunctionDef name:row_splits_to_segment_ids arg:splits arg:name arg:out_type arguments arg arg arg With Call Assign Call If Compare Raise Call Call If Compare Call Raise Call If Compare Assign Assign Call Assign Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "value_to_string", + "source_code": "def value_to_string(self, obj):\n return str(self.value_from_object(obj))", + "docstring": "Return a string value of this field from the passed obj. This is used by the serialization framework.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\__init__.py", + "ast_data": "FunctionDef name:value_to_string arg:self arg:obj arguments arg arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "Problem14", + "source_code": "class Problem14(Benchmark):\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n self._bounds = [(0.0, 4.0)]\n self.global_optimum = 0.224885\n self.fglob = -0.788685\n\n def fun(self, x, *args):\n self.nfev += 1\n x = x[0]\n return -exp(-x) * sin(2.0 * pi * x)", + "docstring": "Univariate Problem14 objective function. This class defines the Univariate Problem14 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem14}}(x) = -e^{-x} \\sin(2\\pi x) Bound constraints: :math: .. figure:: figures/Problem14.png :alt: Univariate Problem14 function :align: center **Univariate Problem14 function** *Global optimum*: :math: for :math:", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py", + "ast_data": "ClassDef name:Problem14 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_get_concat_axis_dataframe", + "source_code": "def _get_concat_axis_dataframe(objs: list[Series | DataFrame], axis: AxisInt, ignore_index: bool, keys: Iterable[Hashable] | None, names: list[HashableT] | None, levels, verify_integrity: bool) -> Index:\n indexes_gen = (x.axes[axis] for x in objs)\n if ignore_index:\n return default_index(sum((len(i) for i in indexes_gen)))\n else:\n indexes = list(indexes_gen)\n if keys is None:\n if levels is not None:\n raise ValueError('levels supported only when keys is not None')\n concat_axis = _concat_indexes(indexes)\n else:\n concat_axis = _make_concat_multiindex(indexes, keys, levels, names)\n if verify_integrity and (not concat_axis.is_unique):\n overlap = concat_axis[concat_axis.duplicated()].unique()\n raise ValueError(f'Indexes have overlapping values: {overlap}')\n return concat_axis", + "docstring": "Return result concat axis when concatenating DataFrame objects.", + "type": "function", + "file_path": "pandas\\pandas\\core\\reshape\\concat.py", + "ast_data": "FunctionDef name:_get_concat_axis_dataframe arg:objs arg:axis arg:ignore_index arg:keys arg:names arg:levels arg:verify_integrity arguments arg arg arg arg arg arg arg Assign If Return return:yes Call Call Call Assign Call If Compare If Compare Raise Call Assign Call Assign Call If BoolOp Assign Call Call Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "StackTraceTransform", + "source_code": "class StackTraceTransform(object):\n _stack_dict = None\n _thread_key = None\n\n def __enter__(self):\n if self._thread_key is None:\n self._thread_key = _get_thread_key()\n else:\n assert self._thread_key == _get_thread_key(), 'Shared across threads?'\n stack = self._stack_dict[self._thread_key]\n self.parent = stack[-1]\n stack.append(self)\n self.update()\n return self\n\n def __exit__(self, unused_type, unused_value, unused_traceback):\n top = self._stack_dict[self._thread_key].pop()\n assert top is self, 'Concurrent access?'\n\n def update(self):\n raise NotImplementedError('subclasses need to override this')", + "docstring": "Base class for stack trace transformation functions.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_stack.py", + "ast_data": "ClassDef name:StackTraceTransform Assign Assign FunctionDef name:__enter__ arg:self arguments arg If Compare Assign Call Compare Call Assign Assign Call Call Return return:yes FunctionDef name:__exit__ arg:self arg:unused_type arg:unused_value arg:unused_traceback arguments arg arg arg arg Assign Call Compare FunctionDef name:update arg:self arguments arg Raise Call" + }, + { + "library": "django", + "name": "_check_autocomplete_fields", + "source_code": "def _check_autocomplete_fields(self, obj):\n if not isinstance(obj.autocomplete_fields, (list, tuple)):\n return must_be('a list or tuple', option='autocomplete_fields', obj=obj, id='admin.E036')\n else:\n return list(chain.from_iterable([self._check_autocomplete_fields_item(obj, field_name, 'autocomplete_fields[%d]' % index) for index, field_name in enumerate(obj.autocomplete_fields)]))", + "docstring": "Check that is a list or tuple of model fields.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\checks.py", + "ast_data": "FunctionDef name:_check_autocomplete_fields arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:yes Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "get_transform", + "source_code": "def get_transform(self):\n return IdentityTransform()", + "docstring": "Return the transform for linear scaling, which is just the .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\scale.py", + "ast_data": "FunctionDef name:get_transform arg:self arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "_torch_solve_cast", + "source_code": "def _torch_solve_cast(A: Tensor, B: Tensor) -> Tensor:\n if is_mps_tensor_safe(A):\n dtype = torch.float32\n else:\n dtype = torch.float64\n out = torch.linalg.solve(A.to(dtype), B.to(dtype))\n return out.to(A.dtype)", + "docstring": "Make torch.solve work with other than fp32/64. For stable operation, the input matrices should be cast to fp64, and the output will be cast back to the input dtype. However, fp64 is not yet supported on MPS.", + "type": "function", + "file_path": "kornia\\kornia\\utils\\helpers.py", + "ast_data": "FunctionDef name:_torch_solve_cast arg:A arg:B arguments arg arg If Call Assign Assign Assign Call Call Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "_copyto", + "source_code": "def _copyto(a, val, mask):\n if isinstance(a, np.ndarray):\n np.copyto(a, val, where=mask, casting='unsafe')\n else:\n a = a.dtype.type(val)\n return a", + "docstring": "Replace values in with NaN where is True. This differs from copyto in that it will deal with the case where is a numpy scalar. Parameters ---------- a : ndarray or numpy scalar Array or numpy scalar some of whose values are to be replaced by val. val : numpy scalar Value used a replacement. mask : ndarray, scalar Boolean array. Where True the corresponding element of is replaced by . Broadcasts. Returns ------- res : ndarray, scalar Array with elements replaced or scalar .", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_nanfunctions_impl.py", + "ast_data": "FunctionDef name:_copyto arg:a arg:val arg:mask arguments arg arg arg If Call Call Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "_pad_simple", + "source_code": "def _pad_simple(array, pad_width, fill_value=None):\n new_shape = tuple((left + size + right for size, (left, right) in zip(array.shape, pad_width)))\n order = 'F' if array.flags.fnc else 'C'\n padded = np.empty(new_shape, dtype=array.dtype, order=order)\n if fill_value is not None:\n padded.fill(fill_value)\n original_area_slice = tuple((slice(left, left + size) for size, (left, right) in zip(array.shape, pad_width)))\n padded[original_area_slice] = array\n return (padded, original_area_slice)", + "docstring": "Pad array on all sides with either a single value or undefined values. Parameters ---------- array : ndarray Array to grow. pad_width : sequence of tuple[int, int] Pad width on both sides for each dimension in . fill_value : scalar, optional If provided the padded area is filled with this value, otherwise the pad area left undefined. Returns ------- padded : ndarray The padded array with the same dtype as. Its order will default to C-style if is not F-contiguous. original_area_slice : tuple A tuple of slices pointing to the area of the original array.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_arraypad_impl.py", + "ast_data": "FunctionDef name:_pad_simple arg:array arg:pad_width arg:fill_value arguments arg arg arg Assign Call Call Assign Assign Call If Compare Call Assign Call Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_without_context", + "source_code": "def _without_context(node, lines, minl, maxl):\n for n in gast.walk(node):\n lineno = getattr(n, 'lineno', None)\n if lineno is not None:\n n.lineno = lineno - minl\n end_lineno = getattr(n, 'end_lineno', None)\n if end_lineno is not None:\n n.end_lineno = end_lineno - minl\n code_lines = lines[minl - 1:maxl]\n end_col_offset = getattr(node, 'end_col_offset', None)\n if end_col_offset is not None:\n code_lines[-1] = code_lines[-1][:end_col_offset]\n col_offset = getattr(node, 'col_offset', None)\n if col_offset is None:\n match = re.search('(?'", + "docstring": "Guess byte order. Sets stream pointer to 0", + "type": "method", + "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py", + "ast_data": "FunctionDef name:guess_byte_order arg:self arguments arg Call Assign Call Call Return return:yes BoolOp BoolOp Compare" + }, + { + "library": "tensorflow", + "name": "_SessionConverterData", + "source_code": "class _SessionConverterData(_ConverterData):\n\n def __init__(self, session, graph_def, output_node_names, variable_names_allowlist=None, variable_names_denylist=None):\n graph_def = graph_util.extract_sub_graph(graph_def, output_node_names)\n super(_SessionConverterData, self).__init__(graph_def, variable_names_allowlist=variable_names_allowlist, variable_names_denylist=variable_names_denylist)\n nodes_to_convert = []\n tensor_names_to_convert = []\n for node in self.graph_def.node:\n if node.op in ['Variable', 'VariableV2', 'VarHandleOp']:\n tensor_name = node.name\n if not self._should_convert(tensor_name):\n continue\n if node.op == 'VarHandleOp':\n tensor_name = tensor_name + '/Read/ReadVariableOp'\n nodes_to_convert.append(node)\n tensor_names_to_convert.append(tensor_name + ':0')\n if tensor_names_to_convert:\n converted_tensors = session.run(tensor_names_to_convert)\n for node, tensor_value in zip(nodes_to_convert, converted_tensors):\n self._tensor_data[node.name] = _TensorData(numpy=tensor_value, dtype=node.attr['dtype'].type, index=None)", + "docstring": "Container for Session-based conversion data.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py", + "ast_data": "ClassDef name:_SessionConverterData FunctionDef name:__init__ arg:self arg:session arg:graph_def arg:output_node_names arg:variable_names_allowlist arg:variable_names_denylist arguments arg arg arg arg arg arg Assign Call Call Call Assign Assign For If Compare Assign If Call If Compare Assign Call Call If Assign Call For Call Assign Call" + }, + { + "library": "tensorflow", + "name": "_BesselI1eGrad", + "source_code": "@ops.RegisterGradient('BesselI1e')\ndef _BesselI1eGrad(op: ops.Operation, grad):\n x = op.inputs[0]\n y = op.outputs[0]\n with ops.control_dependencies([grad]):\n dy_dx = array_ops.where_v2(math_ops.equal(x, 0.0), math_ops.cast(0.5, x.dtype), special_math_ops.bessel_i0e(x) - y * (math_ops.sign(x) + math_ops.reciprocal(x)))\n return grad * dy_dx", + "docstring": "Compute gradient of bessel_i1e(x) with respect to its argument.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_BesselI1eGrad arg:op arg:grad arguments arg arg Assign Assign With Call Assign Call Call Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_get_quant_params", + "source_code": "def _get_quant_params(tensor_detail: Mapping[str, Any]) -> Optional[Tuple[float, int]]:\n quant_params = tensor_detail['quantization_parameters']\n if not quant_params:\n return None\n if quant_params['scales'] and quant_params['zero_points']:\n return (quant_params['scales'][0], quant_params['zero_points'][0])\n return None", + "docstring": "Returns first scale and zero point from tensor detail, if present.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py", + "ast_data": "FunctionDef name:_get_quant_params arg:tensor_detail arguments arg Assign If Return return:no If BoolOp Return return:yes Return return:no" + }, + { + "library": "scikit-learn", + "name": "score", + "source_code": "def score(self, X, y=None):\n check_is_fitted(self)\n X = self._check_non_neg_array(X, reset_n_features=False, whom='LatentDirichletAllocation.score')\n doc_topic_distr = self._unnormalized_transform(X)\n score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)\n return score", + "docstring": "Calculate approximate log-likelihood as score. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Document word matrix. y : Ignored Not used, present here for API consistency by convention. Returns ------- score : float Use approximate bound as score.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_lda.py", + "ast_data": "FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "deserialize_feature_column", + "source_code": "@doc_controls.header(_FEATURE_COLUMN_DEPRECATION_WARNING)\n@tf_export('__internal__.feature_column.deserialize_feature_column', v1=[])\ndef deserialize_feature_column(config, custom_objects=None, columns_by_name=None):\n if isinstance(config, six.string_types):\n return config\n module_feature_column_classes = {cls.__name__: cls for cls in _FEATURE_COLUMNS}\n if columns_by_name is None:\n columns_by_name = {}\n cls, cls_config = _class_and_config_for_serialized_keras_object(config, module_objects=module_feature_column_classes, custom_objects=custom_objects, printable_module_name='feature_column_v2')\n if not issubclass(cls, fc_types.FeatureColumn):\n raise ValueError('Expected FeatureColumn class, instead found: {}'.format(cls))\n new_instance = cls.from_config(cls_config, custom_objects=custom_objects, columns_by_name=columns_by_name)\n return columns_by_name.setdefault(_column_name_with_class_name(new_instance), new_instance)", + "docstring": "Deserializes a generated with . This method should only be used to deserialize parent FeatureColumns when implementing FeatureColumn.from_config(), else deserialize_feature_columns() is preferable. Returns a FeatureColumn for this config. Args: config: A Dict with the serialization of feature columns acquired by , or a string representing a raw column. custom_objects: A Dict from custom_object name to the associated keras serializable objects (FeatureColumns, classes or functions). columns_by_name: A Dict[String, FeatureColumn] of existing columns in order to avoid duplication. Raises: ValueError if has invalid format (e.g: expected keys missing, or refers to unknown classes). Returns: A FeatureColumn corresponding to the input .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\serialization.py", + "ast_data": "FunctionDef name:deserialize_feature_column arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg If Call Return return:yes Assign If Compare Assign Assign Call If Call Raise Call Call Assign Call Return return:yes Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "toolmanager_disconnect", + "source_code": "def toolmanager_disconnect(self, cid):\n return self._callbacks.disconnect(cid)", + "docstring": "Disconnect callback id *cid*. Example usage:: cid = toolmanager.toolmanager_connect('tool_trigger_zoom', onpress) #...later toolmanager.toolmanager_disconnect(cid)", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py", + "ast_data": "FunctionDef name:toolmanager_disconnect arg:self arg:cid arguments arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "__init__", + "source_code": "def __init__(self, model: Module, pre_processor: Module, post_processor: Module, name: Optional[str]=None) -> None:\n super().__init__()\n self.model = model.eval()\n self.pre_processor = pre_processor.eval()\n self.post_processor = post_processor.eval()\n if name is not None:\n self.name = name", + "docstring": "Construct an Object Detector object. Args: model: an object detection model. pre_processor: a pre-processing module post_processor: a post-processing module. name: name of a model.", + "type": "method", + "file_path": "kornia\\kornia\\models\\base.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:model arg:pre_processor arg:post_processor arg:name arguments arg arg arg arg arg Call Call Assign Call Assign Call Assign Call If Compare Assign" + }, + { + "library": "pytorch", + "name": "hardsigmoid", + "source_code": "def hardsigmoid(input: Tensor, inplace: bool=False) -> Tensor:\n if not input.is_quantized:\n raise ValueError(\"Input to 'quantized.hardsigmoid' must be quantized!\")\n if inplace:\n return torch._C._nn.hardsigmoid_(input)\n return torch._C._nn.hardsigmoid(input)", + "docstring": "This is the quantized version of :func:.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py", + "ast_data": "FunctionDef name:hardsigmoid arg:input arg:inplace arguments arg arg If Raise Call If Return return:yes Call Return return:yes Call" + }, + { + "library": "django", + "name": "envelope", + "source_code": "@property\ndef envelope(self):\n return Envelope(capi.get_envelope(self.ptr, byref(OGREnvelope())))", + "docstring": "Return the envelope for this Geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:envelope arg:self arguments arg Return return:yes Call Call Call Call" + }, + { + "library": "django", + "name": "vary_on_headers", + "source_code": "def vary_on_headers(*headers):\n\n def decorator(func):\n if iscoroutinefunction(func):\n\n async def _view_wrapper(request, *args, **kwargs):\n response = await func(request, *args, **kwargs)\n patch_vary_headers(response, headers)\n return response\n else:\n\n def _view_wrapper(request, *args, **kwargs):\n response = func(request, *args, **kwargs)\n patch_vary_headers(response, headers)\n return response\n return wraps(func)(_view_wrapper)\n return decorator", + "docstring": "A view decorator that adds the specified headers to the Vary header of the response. Usage: @vary_on_headers('Cookie', 'Accept-language') def index(request): ... Note that the header names are not case-sensitive.", + "type": "function", + "file_path": "django\\django\\views\\decorators\\vary.py", + "ast_data": "FunctionDef name:vary_on_headers arguments arg FunctionDef name:decorator arg:func arguments arg If Call AsyncFunctionDef name:_view_wrapper arg:request arguments arg arg arg Assign Call Call Return return:yes FunctionDef name:_view_wrapper arg:request arguments arg arg arg Assign Call Call Return return:yes Return return:yes Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "forward_context", + "source_code": "@property\ndef forward_context(self):\n return self._forward_context", + "docstring": "The while loop context for forward.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py", + "ast_data": "FunctionDef name:forward_context arg:self arguments arg Return return:yes" + }, + { + "library": "numpy", + "name": "shape", + "source_code": "@property\ndef shape(self):\n return self.shape_as(_getintp_ctype())", + "docstring": "(c_intp*self.ndim): A ctypes array of length self.ndim where the basetype is the C-integer corresponding to `~numpy.ctypeslib.c_intpctypes.c_intctypes.c_longctypes.c_longlong` depending on the platform. The ctypes array contains the shape of the underlying array.", + "type": "method", + "file_path": "numpy\\numpy\\_core\\_internal.py", + "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "scatter_nd_update", + "source_code": "def scatter_nd_update(self, indices, updates, name=None):\n raise NotImplementedError", + "docstring": "Applies sparse assignment to individual values or slices in a Variable. The Variable has rank and is a of rank . must be integer tensor, containing indices into self. It must be shape where . The innermost dimension of (with length ) corresponds to indices into elements (if ) or slices (if ) along the th dimension of self. is of rank with shape: For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: The resulting update to v would look like this: [1, 11, 3, 10, 9, 6, 7, 12] See for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: The updated variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:scatter_nd_update arg:self arg:indices arg:updates arg:name arguments arg arg arg arg Raise" + }, + { + "library": "tensorflow", + "name": "flatten", + "source_code": "@doc_controls.do_not_doc_inheritable\ndef flatten(self):\n return super().flatten()", + "docstring": "See tf.types.experimental.TraceType base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", + "ast_data": "FunctionDef name:flatten arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, tensor_callable, dtype, device):\n super().__init__(tensor_callable, None, None, dtype, device)", + "docstring": "Initializes a object. Args: tensor_callable: A callable that takes no arguments and returns a Tensor. dtype: Dtype of the tensor returned by the callable. device: Device of the tensor returned by the callable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\tensor_callable.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:tensor_callable arg:dtype arg:device arguments arg arg arg arg Call Call" + }, + { + "library": "pytorch", + "name": "pre", + "source_code": "@property\ndef pre(self) -> Optional[Tuple[str, int]]:\n return self._version.pre", + "docstring": "The pre-release segment of the version. >>> print(Version(\"1.2.3\").pre) None >>> Version(\"1.2.3a1\").pre ('a', 1) >>> Version(\"1.2.3b1\").pre ('b', 1) >>> Version(\"1.2.3rc1\").pre ('rc', 1)", + "type": "method", + "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py", + "ast_data": "FunctionDef name:pre arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "min_max_variable_partitioner", + "source_code": "@tf_export(v1=['min_max_variable_partitioner'])\ndef min_max_variable_partitioner(max_partitions=1, axis=0, min_slice_size=256 << 10, bytes_per_string_element=16):\n\n def _partitioner(shape, dtype):\n if axis >= len(shape):\n raise ValueError(f'Cannot partition variable along axis {axis} when shape is only {shape}')\n dtype = dtypes.as_dtype(dtype)\n if dtype.base_dtype == dtypes.string:\n bytes_per_element = bytes_per_string_element\n else:\n bytes_per_element = dtype.size\n total_size_bytes = shape.num_elements() * bytes_per_element\n partitions = total_size_bytes / min_slice_size\n partitions_list = [1] * len(shape)\n partitions_list[axis] = max(1, min(shape.dims[axis].value, max_partitions, int(math.ceil(partitions))))\n return partitions_list\n return _partitioner", + "docstring": "Partitioner to allocate minimum size per slice. Returns a partitioner that partitions the variable of given shape and dtype such that each partition has a minimum of slice of the variable. The maximum number of such partitions (upper bound) is given by . Args: max_partitions: Upper bound on the number of partitions. Defaults to 1. axis: Axis along which to partition the variable. Defaults to 0. min_slice_size: Minimum size of the variable slice per partition. Defaults to 256K. bytes_per_string_element: If the is of type string, this provides an estimate of how large each scalar in the is. Returns: A partition function usable as the argument to and .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\partitioned_variables.py", + "ast_data": "FunctionDef name:min_max_variable_partitioner arg:max_partitions arg:axis arg:min_slice_size arg:bytes_per_string_element arguments arg arg arg arg FunctionDef name:_partitioner arg:shape arg:dtype arguments arg arg If Compare Call Raise Call Assign Call If Compare Assign Assign Assign Call Assign Assign Call Assign Call Call Call Call Return return:yes Return return:yes Call" + }, + { + "library": "pandas", + "name": "get_atom_coltype", + "source_code": "@classmethod\ndef get_atom_coltype(cls, kind: str) -> type[Col]:\n if kind.startswith('uint'):\n k4 = kind[4:]\n col_name = f'UInt{k4}Col'\n elif kind.startswith('period'):\n col_name = 'Int64Col'\n else:\n kcap = kind.capitalize()\n col_name = f'{kcap}Col'\n return getattr(_tables(), col_name)", + "docstring": "return the PyTables column class for this column", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:get_atom_coltype arg:cls arg:kind arguments arg arg If Call Assign Assign If Call Assign Assign Call Assign Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "predict_log_proba", + "source_code": "@available_if(_estimator_has('predict_log_proba'))\ndef predict_log_proba(self, X):\n check_is_fitted(self)\n return self.estimator_.predict_log_proba(self.transform(X))", + "docstring": "Predict class log-probabilities for X. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. Returns ------- p : array of shape (n_samples, n_classes) The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_selection\\_rfe.py", + "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "deprecate_moved_module", + "source_code": "def deprecate_moved_module(deprecated_name, new_module, deletion_version):\n\n def getter(name):\n if getter not in _PRINTED_WARNING and _PRINT_DEPRECATION_WARNINGS:\n _PRINTED_WARNING[getter] = True\n _log_deprecation('Please fix your imports. Module %s has been moved to %s. The old module will be deleted in version %s.', deprecated_name, new_module.__name__, deletion_version)\n return getattr(new_module, name)\n return getter", + "docstring": "Logs a warning when a module that has been moved to a new location is used. Copy the following code into the old module: Args: deprecated_name: Name of old module. new_module: Module to replace the old module. deletion_version: Version of TensorFlow in which the old module will be removed. Returns: A function that logs a warning and returns the symbol from the new module. Set this function as the module's .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py", + "ast_data": "FunctionDef name:deprecate_moved_module arg:deprecated_name arg:new_module arg:deletion_version arguments arg arg arg FunctionDef name:getter arg:name arguments arg If BoolOp Compare Assign Call Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "register_custom_op_symbolic", + "source_code": "def register_custom_op_symbolic(symbolic_name: str, symbolic_fn: Callable, opset_version: int):\n if symbolic_name.startswith('::'):\n symbolic_name = f'aten{symbolic_name}'\n _verify_custom_op_name(symbolic_name)\n registration.custom_onnx_symbolic(symbolic_name, opset_version)(symbolic_fn)", + "docstring": "Registers a symbolic function for a custom operator. When the user registers symbolic for custom/contrib ops, it is highly recommended to add shape inference for that operator via setType API, otherwise the exported graph may have incorrect shape inference in some extreme cases. An example of setType is in . See \"Custom Operators\" in the module documentation for an example usage. Args: symbolic_name (str): The name of the custom operator in \"::\" format. symbolic_fn (Callable): A function that takes in the ONNX graph and the input arguments to the current operator, and returns new operator nodes to add to the graph. opset_version (int): The ONNX opset version in which to register.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\utils.py", + "ast_data": "FunctionDef name:register_custom_op_symbolic arg:symbolic_name arg:symbolic_fn arg:opset_version arguments arg arg arg If Call Assign Call Call Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, child):\n _api.check_isinstance(Transform, child=child)\n super().__init__()\n self.set(child)", + "docstring": "*child*: A instance. This child may later be replaced with :meth:.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:child arguments arg arg Call Call Call Call" + }, + { + "library": "pytorch", + "name": "prepare_model_with_stubs", + "source_code": "def prepare_model_with_stubs(float_module: nn.Module, q_module: nn.Module, module_swap_list: set[type], logger_cls: Callable) -> None:\n torch._C._log_api_usage_once('quantization_api._numeric_suite.prepare_model_with_stubs')\n float_module_children = dict(float_module.named_children())\n reassign = {}\n for name, mod in q_module.named_children():\n if name not in float_module_children:\n continue\n float_mod = float_module_children[name]\n if type(float_mod) not in module_swap_list:\n prepare_model_with_stubs(float_mod, mod, module_swap_list, logger_cls)\n if type(float_mod) in module_swap_list and (not _is_identical_module_type(mod, float_mod)):\n reassign[name] = Shadow(mod, float_mod, logger_cls)\n for key, value in reassign.items():\n q_module._modules[key] = value", + "docstring": "Prepare the model by attaching the float module to its matching quantized module as the shadow if the float module type is in module_swap_list. Example usage:: prepare_model_with_stubs(float_model, q_model, module_swap_list, Logger) q_model(data) ob_dict = get_logger_dict(q_model) Args: float_module: float module used to generate the q_module q_module: module quantized from float_module module_swap_list: list of float module types to attach the shadow logger_cls: type of logger to be used in shadow module to process the outputs of quantized module and its float shadow module", + "type": "function", + "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite.py", + "ast_data": "FunctionDef name:prepare_model_with_stubs arg:float_module arg:q_module arg:module_swap_list arg:logger_cls arguments arg arg arg arg Call Assign Call Call Assign For Call If Compare Assign If Compare Call Call If BoolOp Compare Call Call Assign Call For Call Assign" + }, + { + "library": "scipy", + "name": "sawtooth", + "source_code": "def sawtooth(t, width=1):\n t, w = (asarray(t), asarray(width))\n w = asarray(w + (t - t))\n t = asarray(t + (w - w))\n y = zeros(t.shape, dtype='d')\n mask1 = (w > 1) | (w < 0)\n place(y, mask1, nan)\n tmod = mod(t, 2 * pi)\n mask2 = 1 - mask1 & (tmod < w * 2 * pi)\n tsub = extract(mask2, tmod)\n wsub = extract(mask2, w)\n place(y, mask2, tsub / (pi * wsub) - 1)\n mask3 = 1 - mask1 & 1 - mask2\n tsub = extract(mask3, tmod)\n wsub = extract(mask3, w)\n place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))\n return y", + "docstring": "Return a periodic sawtooth or triangle waveform. The sawtooth waveform has a period `widthwidth` = 0.5 produces a triangle wave. If an array, causes wave shape to change over time, and must be the same length as t. Returns ------- y : ndarray Output array containing the sawtooth waveform. Examples -------- A 5 Hz waveform sampled at 500 Hz for 1 second: >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> t = np.linspace(0, 1, 500) >>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_waveforms.py", + "ast_data": "FunctionDef name:sawtooth arg:t arg:width arguments arg arg Assign Call Call Assign Call Assign Call Assign Call Assign Compare Compare Call Assign Call Assign Compare Assign Call Assign Call Call Assign Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "he_normal", + "source_code": "@tf_export(v1=['initializers.he_normal'])\ndef he_normal(seed=None):\n return VarianceScaling(scale=2.0, mode='fan_in', distribution='truncated_normal', seed=seed)", + "docstring": "He normal initializer. It draws samples from a truncated normal distribution centered on 0 with standard deviation (after truncation) given by where is the number of input units in the weight tensor. Args: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: [He et al., 2015] ( # pylint: disable=line-too-long ([pdf](", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py", + "ast_data": "FunctionDef name:he_normal arg:seed arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "record", + "source_code": "def record(self, stream=None) -> None:\n if stream is None:\n stream = torch.xpu.current_stream()\n super().record(stream)", + "docstring": "Record the event in a given stream. Uses `` if no stream is specified. The stream's device must match the event's device.", + "type": "method", + "file_path": "pytorch\\torch\\xpu\\streams.py", + "ast_data": "FunctionDef name:record arg:self arg:stream arguments arg arg If Compare Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "_get_quantize_handler_cls", + "source_code": "def _get_quantize_handler_cls(observation_type: ObservationType, dtype_configs: list[DTypeConfig], num_tensor_args_to_observation_type: dict[int, ObservationType]) -> type[QuantizeHandler]:\n\n class ConfigurableQuantizeHandler(QuantizeHandler):\n\n def __init__(self, node_pattern: NodePattern, modules: dict[str, torch.nn.Module], root_node_getter: Optional[Callable]=None):\n super().__init__(node_pattern, modules, root_node_getter)\n if num_tensor_args_to_observation_type:\n assert self.num_tensor_args in num_tensor_args_to_observation_type, f'Must provide observation_type config for tensor number {self.num_tensor_args} in num_tensor_args_to_observation_type for {node_pattern}'\n self.observation_type = num_tensor_args_to_observation_type[self.num_tensor_args]\n else:\n self.observation_type = observation_type\n self.dtype_configs = dtype_configs\n\n def is_general_tensor_value_op(self) -> bool:\n return self.observation_type == ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT\n return ConfigurableQuantizeHandler", + "docstring": "Return a configurable QuantizeHandler that matches the given specifications from the backend.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\quantize_handler.py", + "ast_data": "FunctionDef name:_get_quantize_handler_cls arg:observation_type arg:dtype_configs arg:num_tensor_args_to_observation_type arguments arg arg arg ClassDef name:ConfigurableQuantizeHandler FunctionDef name:__init__ arg:self arg:node_pattern arg:modules arg:root_node_getter arguments arg arg arg arg Call Call If Compare Assign Assign Assign FunctionDef name:is_general_tensor_value_op arg:self arguments arg Return return:yes Compare Return return:yes" + }, + { + "library": "tensorflow", + "name": "get", + "source_code": "def get(identifier):\n if identifier is None:\n return None\n if isinstance(identifier, str):\n identifier = str(identifier)\n return deserialize(identifier)\n if isinstance(identifier, dict):\n return deserialize(identifier)\n if callable(identifier):\n return identifier\n raise ValueError(f'Could not interpret loss function identifier: {identifier}')", + "docstring": "Retrieves a Keras loss as a / class instance. The may be the string name of a loss function or class. >>> loss = tf.keras.losses.get(\"categorical_crossentropy\") >>> type(loss) >>> loss = tf.keras.losses.get(\"CategoricalCrossentropy\") >>> type(loss) You can also specify of the loss to this function by passing dict containing and as an identifier. Also note that the must map to a class >>> identifier = {\"class_name\": \"CategoricalCrossentropy\", ... \"config\": {\"from_logits\": True}} >>> loss = tf.keras.losses.get(identifier) >>> type(loss) Args: identifier: A loss identifier. One of None or string name of a loss function/class or loss configuration dictionary or a loss function or a loss class instance. Returns: A Keras loss as a / class instance. Raises: ValueError: If cannot be interpreted.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:get arg:identifier arguments arg If Compare Return return:no If Call Assign Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Raise Call" + }, + { + "library": "scrapy", + "name": "from_settings", + "source_code": "@classmethod\ndef from_settings(cls, settings: BaseSettings) -> Self:\n pass", + "docstring": "Return an instance of the class for the given settings", + "type": "method", + "file_path": "scrapy\\scrapy\\spiderloader.py", + "ast_data": "FunctionDef name:from_settings arg:cls arg:settings arguments arg arg" + }, + { + "library": "tensorflow", + "name": "Symbol", + "source_code": "class Symbol(Tensor):\n pass", + "docstring": "Symbolic \"graph\" Tensor. These objects represent the output of an op definition and do not carry a value.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\types\\core.py", + "ast_data": "ClassDef name:Symbol" + }, + { + "library": "tensorflow", + "name": "cosine_proximity", + "source_code": "def cosine_proximity(y_true, y_pred, axis=-1):\n y_true = nn.l2_normalize(y_true, axis=axis)\n y_pred = nn.l2_normalize(y_pred, axis=axis)\n return math_ops.reduce_sum(y_true * y_pred, axis=axis)", + "docstring": "Computes the cosine similarity between labels and predictions. Args: y_true: The ground truth values. y_pred: The prediction values. axis: (Optional) Defaults to -1. The dimension along which the cosine similarity is computed. Returns: Cosine similarity value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "FunctionDef name:cosine_proximity arg:y_true arg:y_pred arg:axis arguments arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "write_sparse", + "source_code": "def write_sparse(self, arr, name):\n A = arr.tocoo()\n imagf = A.dtype.kind == 'c'\n ijv = np.zeros((A.nnz + 1, 3 + imagf), dtype='f8')\n ijv[:-1, 0] = A.row\n ijv[:-1, 1] = A.col\n ijv[:-1, 0:2] += 1\n if imagf:\n ijv[:-1, 2] = A.data.real\n ijv[:-1, 3] = A.data.imag\n else:\n ijv[:-1, 2] = A.data\n ijv[-1, 0:2] = A.shape\n self.write_header(name, ijv.shape, P=miDOUBLE, T=mxSPARSE_CLASS)\n self.write_bytes(ijv)", + "docstring": "Sparse matrices are 2-D See docstring for VarReader4.read_sparse_array", + "type": "method", + "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py", + "ast_data": "FunctionDef name:write_sparse arg:self arg:arr arg:name arguments arg arg arg Assign Call Assign Compare Assign Call Assign Assign If Assign Assign Assign Assign Call Call" + }, + { + "library": "tensorflow", + "name": "get_output_at", + "source_code": "@doc_controls.do_not_doc_inheritable\ndef get_output_at(self, node_index):\n return self._get_node_attribute_at_index(node_index, 'output_tensors', 'output')", + "docstring": "Retrieves the output tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first output node of the layer. Returns: A tensor (or list of tensors if the layer has multiple outputs). Raises: RuntimeError: If called in Eager mode.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:get_output_at arg:self arg:node_index arguments arg arg Return return:yes Call" + }, + { + "library": "django", + "name": "get_relations", + "source_code": "def get_relations(self, cursor, table_name):\n table_name = table_name.upper()\n cursor.execute('\\n SELECT ca.column_name, cb.table_name, cb.column_name\\n FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb\\n WHERE user_constraints.table_name = %s AND\\n user_constraints.constraint_name = ca.constraint_name AND\\n user_constraints.r_constraint_name = cb.constraint_name AND\\n ca.position = cb.position', [table_name])\n return {self.identifier_converter(field_name): (self.identifier_converter(rel_field_name), self.identifier_converter(rel_table_name)) for field_name, rel_table_name, rel_field_name in cursor.fetchall()}", + "docstring": "Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all foreign keys in the given table.", + "type": "method", + "file_path": "django\\django\\db\\backends\\oracle\\introspection.py", + "ast_data": "FunctionDef name:get_relations arg:self arg:cursor arg:table_name arguments arg arg arg Assign Call Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_as_graph_element", + "source_code": "def _as_graph_element(obj):\n conv_fn = getattr(obj, '_as_graph_element', None)\n if conv_fn and callable(conv_fn):\n return conv_fn()\n return None", + "docstring": "Convert to a graph element if possible, otherwise return . Args: obj: Object to convert. Returns: The result of if that method is available; otherwise .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:_as_graph_element arg:obj arguments arg Assign Call If BoolOp Call Return return:yes Call Return return:no" + }, + { + "library": "pytorch", + "name": "_is_non_negative_check", + "source_code": "def _is_non_negative_check(cond: sympy.Basic) -> Optional[str]:\n if isinstance(cond, sympy.Rel):\n if cond.rel_op == '>=' and cond.rhs == 0:\n return str(cond.lhs)\n return None", + "docstring": "Check if a condition (SymPy expression) is checking for non-negative values (>= 0). Returns the variable name if it's a non-negative check (>= 0), None otherwise.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:_is_non_negative_check arg:cond arguments arg If Call If BoolOp Compare Compare Return return:yes Call Return return:no" + }, + { + "library": "pandas", + "name": "_get_value", + "source_code": "def _get_value(self, index, col, takeable: bool=False) -> Scalar:\n if takeable:\n series = self._ixs(col, axis=1)\n return series._values[index]\n series = self._get_item(col)\n if not isinstance(self.index, MultiIndex):\n row = self.index.get_loc(index)\n return series._values[row]\n loc = self.index._engine.get_loc(index)\n return series._values[loc]", + "docstring": "Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both and ; Caller is responsible for checking.", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:_get_value arg:self arg:index arg:col arg:takeable arguments arg arg arg arg If Assign Call Return return:yes Assign Call If Call Assign Call Return return:yes Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, name, shape, dtype, variable_list, partitions):\n if not isinstance(variable_list, (list, tuple)):\n raise TypeError('variable_list is not a list or tuple: %s' % variable_list)\n if not isinstance(partitions, (list, tuple)):\n raise TypeError('partitions is not a list or tuple: %s' % partitions)\n if not all((p >= 1 for p in partitions)):\n raise ValueError('partition values must be positive: %s' % partitions)\n if not variable_list:\n raise ValueError('variable_list may not be empty')\n for v in variable_list:\n if not all((v._get_save_slice_info() is not None for v in variable_list)):\n raise ValueError('All variables must have a save_slice_info available: %s' % [v.name for v in variable_list])\n if len(shape) != len(partitions):\n raise ValueError('len(shape) != len(partitions): %s vs. %s' % (shape, partitions))\n if v._get_save_slice_info().full_shape != shape:\n raise ValueError(\"All variables' full shapes must match shape: %s; but full shapes were: %s\" % (shape, str([v._get_save_slice_info().full_shape])))\n self._variable_list = sorted(variable_list, key=lambda v: v._get_save_slice_info().var_offset)\n self._name = name\n self._shape = shape\n self._dtype = dtype\n self._partitions = partitions\n self._as_tensor = None", + "docstring": "Creates a new partitioned variable wrapper. Variables passed via the variable_list must contain a save_slice_info field. Concatenation and iteration is in lexicographic order according to the var_offset property of the save_slice_info. Args: name: String. Overall name of the variables. shape: List of integers. Overall shape of the variables. dtype: Type of the variables. variable_list: List of that comprise this partitioned variable. partitions: List of integers. Number of partitions for each dimension. Raises: TypeError: If is not a list of objects, or is not a list. ValueError: If is empty, or the shape information does not match , or has invalid values.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:shape arg:dtype arg:variable_list arg:partitions arguments arg arg arg arg arg arg If Call Raise Call If Call Raise Call If Call Compare Raise Call If Raise Call For If Call Compare Call Raise Call If Compare Call Call Raise Call If Compare Call Raise Call Call Call Assign Call arguments arg Call Assign Assign Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "_cmake_cache_file", + "source_code": "@property\ndef _cmake_cache_file(self) -> str:\n return os.path.join(self.build_dir, 'CMakeCache.txt')", + "docstring": "Returns the path to CMakeCache.txt. Returns: string: The path to CMakeCache.txt.", + "type": "method", + "file_path": "pytorch\\tools\\setup_helpers\\cmake.py", + "ast_data": "FunctionDef name:_cmake_cache_file arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "synchronize", + "source_code": "def synchronize(self) -> None:\n super().synchronize()", + "docstring": "Wait for all the kernels in this stream to complete.", + "type": "method", + "file_path": "pytorch\\torch\\xpu\\streams.py", + "ast_data": "FunctionDef name:synchronize arg:self arguments arg Call Call" + }, + { + "library": "numpy", + "name": "as_integer", + "source_code": "def as_integer(obj, kind=4):\n if isinstance(obj, int):\n return Expr(Op.INTEGER, (obj, kind))\n if isinstance(obj, Expr):\n if obj.op is Op.INTEGER:\n return obj\n raise OpError(f'cannot convert {obj} to INTEGER constant')", + "docstring": "Return object as INTEGER constant.", + "type": "function", + "file_path": "numpy\\numpy\\f2py\\symbolic.py", + "ast_data": "FunctionDef name:as_integer arg:obj arg:kind arguments arg arg If Call Return return:yes Call If Call If Compare Return return:yes Raise Call" + }, + { + "library": "tensorflow", + "name": "random_shuffle", + "source_code": "@dispatch.dispatch_for_types(random_ops.random_shuffle, StructuredTensor)\ndef random_shuffle(value, seed=None, name=None):\n with ops.name_scope(name, 'shuffle', [value, seed]):\n if value.rank == 0:\n raise ValueError('Cannot shuffle a scalar StructuredTensor')\n first_dimension = value.nrows()\n index = random_ops.random_shuffle(math_ops.range(first_dimension), seed=seed)\n return gather(value, index, axis=0)", + "docstring": "Shuffle a structured tensor on the zeroth axis. Args: value: a structured tensor of rank at least one. seed: the seed for shuffling. name: the name for shuffle. Returns: The shuffled structured tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py", + "ast_data": "FunctionDef name:random_shuffle arg:value arg:seed arg:name arguments arg arg arg With Call If Compare Raise Call Assign Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "result", + "source_code": "@abc.abstractmethod\ndef result(self):\n raise NotImplementedError('Must be implemented in subclasses.')", + "docstring": "Computes and returns the metric value tensor. Result computation is an idempotent operation that simply calculates the metric value using the state variables.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", + "ast_data": "FunctionDef name:result arg:self arguments arg Raise Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, dump):\n self._dump = dump\n self._cached_tensor_values = {}", + "docstring": "Constructor of ExpressionEvaluator. Args: dump: an instance of .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\evaluator.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:dump arguments arg arg Assign Assign" + }, + { + "library": "tensorflow", + "name": "get_next", + "source_code": "def get_next(self, device=None):\n if device is not None:\n index = self._devices.index(device)\n return self._device_iterators[index].get_next()\n result = []\n for i, device in enumerate(self._devices):\n with ops.device(device):\n result.append(self._device_iterators[i].get_next())\n return result", + "docstring": "Returns the next element given a , else returns all in a list.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\multi_device_iterator_ops.py", + "ast_data": "FunctionDef name:get_next arg:self arg:device arguments arg arg If Compare Assign Call Return return:yes Call Assign For Call With Call Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "names", + "source_code": "@property\ndef names(self):\n if is_term(self.terms):\n return frozenset([self.terms.name])\n return frozenset((term.name for term in com.flatten(self.terms)))", + "docstring": "Get the names in an expression.", + "type": "method", + "file_path": "pandas\\pandas\\core\\computation\\expr.py", + "ast_data": "FunctionDef name:names arg:self arguments arg If Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "inverse_transform", + "source_code": "def inverse_transform(self, X, copy=True):\n check_is_fitted(self)\n X = check_array(X, copy=copy and self.whiten, dtype=[np.float64, np.float32])\n X = np.dot(X, self.mixing_.T)\n if self.whiten:\n X += self.mean_\n return X", + "docstring": "Transform the sources back to the mixed data (apply mixing matrix). Parameters ---------- X : array-like of shape (n_samples, n_components) Sources, where is the number of samples and is the number of components. copy : bool, default=True If False, data passed to fit are overwritten. Defaults to True. Returns ------- X_original : ndarray of shape (n_samples, n_features) Reconstructed data obtained with the mixing matrix.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_fastica.py", + "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arg:copy arguments arg arg arg Call Assign Call BoolOp Assign Call If Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_versions", + "source_code": "def get_versions(versions_file):\n with open('versions.txt', 'r') as f:\n return dict((line.strip().split('=') for line in f))", + "docstring": "Get the versions of the packages used in the linter job. Parameters ---------- versions_file : str The path to the file that contains the versions of the packages. Returns ------- versions : dict A dictionary with the versions of the packages.", + "type": "function", + "file_path": "scikit-learn\\build_tools\\get_comment.py", + "ast_data": "FunctionDef name:get_versions arg:versions_file arguments arg With Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "_make_block_mask", + "source_code": "def _make_block_mask(self, data, sparse_block_shape, zeros_per_block, mask=None):\n h, w = data.shape[-2:]\n block_h, block_w = sparse_block_shape\n dh = (block_h - h % block_h) % block_h\n dw = (block_w - w % block_w) % block_w\n values_per_block = reduce(operator.mul, sparse_block_shape)\n if mask is None:\n mask = torch.ones((h + dh, w + dw), device=data.device)\n if values_per_block == zeros_per_block:\n mask.data = torch.zeros_like(mask)\n return mask\n padded_data = torch.ones(h + dh, w + dw, dtype=data.dtype, device=data.device)\n padded_data.fill_(torch.nan)\n padded_data[:h, :w] = data\n unfolded_data = F.unfold(padded_data[None, None, :], kernel_size=sparse_block_shape, stride=sparse_block_shape)\n mask_reshape = mask.reshape(unfolded_data.shape)\n _, sorted_idx = torch.topk(unfolded_data, k=zeros_per_block, dim=1, largest=False)\n self._scatter_fold_block_mask(dim=1, indices=sorted_idx, output_shape=padded_data.shape, block_shape=sparse_block_shape, mask=mask_reshape)\n mask.data = mask_reshape.squeeze().reshape(mask.shape).contiguous()\n return mask", + "docstring": "Creates a block-level mask. Block-level mask is described as a mask, where the granularity of sparsification of the largest patch is the sparse_block_shape. That means that for a given mask and a sparse_block_shape, the sparsity is computed only within a patch of a size sparse_block_shape. In this context the describes the number of zeroed-out elements within a patch.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\weight_norm_sparsifier.py", + "ast_data": "FunctionDef name:_make_block_mask arg:self arg:data arg:sparse_block_shape arg:zeros_per_block arg:mask arguments arg arg arg arg arg Assign Assign Assign Assign Assign Call If Compare Assign Call If Compare Assign Call Return return:yes Assign Call Call Assign Assign Call Assign Call Assign Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "HalfPoissonLoss", + "source_code": "class HalfPoissonLoss(BaseLoss):\n\n def __init__(self, sample_weight=None):\n super().__init__(closs=CyHalfPoissonLoss(), link=LogLink())\n self.interval_y_true = Interval(0, np.inf, True, False)\n\n def constant_to_optimal_zero(self, y_true, sample_weight=None):\n term = xlogy(y_true, y_true) - y_true\n if sample_weight is not None:\n term *= sample_weight\n return term", + "docstring": "Half Poisson deviance loss with log-link, for regression. Domain: y_true in non-negative real numbers y_pred in positive real numbers Link: y_pred = exp(raw_prediction) For a given sample x_i, half the Poisson deviance is defined as:: loss(x_i) = y_true_i * log(y_true_i/exp(raw_prediction_i)) - y_true_i + exp(raw_prediction_i) Half the Poisson deviance is actually the negative log-likelihood up to constant terms (not involving raw_prediction) and simplifies the computation of the gradients. We also skip the constant term .", + "type": "class", + "file_path": "scikit-learn\\sklearn\\_loss\\loss.py", + "ast_data": "ClassDef name:HalfPoissonLoss FunctionDef name:__init__ arg:self arg:sample_weight arguments arg arg Call Call Call Call Assign Call FunctionDef name:constant_to_optimal_zero arg:self arg:y_true arg:sample_weight arguments arg arg arg Assign Call If Compare Return return:yes" + }, + { + "library": "django", + "name": "set_name_with_model", + "source_code": "def set_name_with_model(self, model):\n _, table_name = split_identifier(model._meta.db_table)\n column_names = [model._meta.get_field(field_name).column for field_name, order in self.fields_orders]\n column_names_with_order = [('-%s' if order else '%s') % column_name for column_name, (field_name, order) in zip(column_names, self.fields_orders)]\n hash_data = [table_name, *column_names_with_order, self.suffix]\n self.name = '%s_%s_%s' % (table_name[:11], column_names[0][:7], '%s_%s' % (names_digest(*hash_data, length=6), self.suffix))\n if len(self.name) > self.max_name_length:\n raise ValueError('Index too long for multiple database support. Is self.suffix longer than 3 characters?')\n if self.name[0] == '_' or self.name[0].isdigit():\n self.name = 'D%s' % self.name[1:]", + "docstring": "Generate a unique name for the index. The name is divided into 3 parts - table name (12 chars), field name (8 chars) and unique hash + suffix (10 chars). Each part is made to fit its size by truncating the excess length.", + "type": "method", + "file_path": "django\\django\\db\\models\\indexes.py", + "ast_data": "FunctionDef name:set_name_with_model arg:self arg:model arguments arg arg Assign Call Assign Call Assign Call Assign Assign Call If Compare Call Raise Call If BoolOp Compare Call Assign" + }, + { + "library": "seaborn", + "name": "DisplayConfig", + "source_code": "class DisplayConfig(TypedDict):\n format: Literal['png', 'svg']\n scaling: float\n hidpi: bool", + "docstring": "Configuration for IPython's rich display hooks.", + "type": "class", + "file_path": "seaborn\\seaborn\\_core\\plot.py", + "ast_data": "ClassDef name:DisplayConfig" + }, + { + "library": "pytorch", + "name": "LazyConv1d", + "source_code": "class LazyConv1d(_LazyConvXdMixin, Conv1d):\n cls_to_become = Conv1d\n\n def __init__(self, out_channels: int, kernel_size: _size_1_t, stride: _size_1_t=1, padding: _size_1_t=0, dilation: _size_1_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', device=None, dtype=None) -> None:\n factory_kwargs = {'device': device, 'dtype': dtype}\n super().__init__(0, 0, kernel_size, stride, padding, dilation, groups, False, padding_mode, **factory_kwargs)\n self.weight = UninitializedParameter(**factory_kwargs)\n self.out_channels = out_channels\n if bias:\n self.bias = UninitializedParameter(**factory_kwargs)\n\n def _get_num_spatial_dims(self) -> int:\n return 1", + "docstring": "A :class: module with lazy initialization of the `Conv1dweightbiastorch.nn.modules.lazy.LazyModuleMixintorch.nn.Conv1dtorch.nn.modules.lazy.LazyModuleMixin`", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\conv.py", + "ast_data": "ClassDef name:LazyConv1d Assign FunctionDef name:__init__ arg:self arg:out_channels arg:kernel_size arg:stride arg:padding arg:dilation arg:groups arg:bias arg:padding_mode arg:device arg:dtype arguments arg arg arg arg arg arg arg arg arg arg arg Assign Call Call Assign Call Assign If Assign Call FunctionDef name:_get_num_spatial_dims arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_thetamax", + "source_code": "def set_thetamax(self, thetamax):\n self.viewLim.x1 = np.deg2rad(thetamax)", + "docstring": "Set the maximum theta limit in degrees.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py", + "ast_data": "FunctionDef name:set_thetamax arg:self arg:thetamax arguments arg arg Assign Call" + }, + { + "library": "pytorch", + "name": "Softplus", + "source_code": "class Softplus(Module):\n __constants__ = ['beta', 'threshold']\n beta: float\n threshold: float\n\n def __init__(self, beta: float=1.0, threshold: float=20.0) -> None:\n super().__init__()\n self.beta = beta\n self.threshold = threshold\n\n def forward(self, input: Tensor) -> Tensor:\n return F.softplus(input, self.beta, self.threshold)\n\n def extra_repr(self) -> str:\n return f'beta={self.beta}, threshold={self.threshold}'", + "docstring": "Applies the Softplus function element-wise. .. math:: \\text{Softplus}(x) = \\frac{1}{\\beta} * \\log(1 + \\exp(\\beta * x)) SoftPlus is a smooth approximation to the ReLU function and can be used to constrain the output of a machine to always be positive. For numerical stability the implementation reverts to the linear function when :math:. Args: beta: the :math: value for the Softplus formulation. Default: 1 threshold: values above this revert to a linear function. Default: 20 Shape: - Input: :math:, where :math: means any number of dimensions. - Output: :math:, same shape as the input. .. image:: ../scripts/activation_images/Softplus.png Examples:: >>> m = nn.Softplus() >>> input = torch.randn(2) >>> output = m(input)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\activation.py", + "ast_data": "ClassDef name:Softplus Assign FunctionDef name:__init__ arg:self arg:beta arg:threshold arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "@_docstring.interpd\ndef __init__(self, x, y, dx, dy, *, width=0.001, length_includes_head=False, head_width=None, head_length=None, shape='full', overhang=0, head_starts_at_zero=False, **kwargs):\n self._x = x\n self._y = y\n self._dx = dx\n self._dy = dy\n self._width = width\n self._length_includes_head = length_includes_head\n self._head_width = head_width\n self._head_length = head_length\n self._shape = shape\n self._overhang = overhang\n self._head_starts_at_zero = head_starts_at_zero\n self._make_verts()\n super().__init__(self.verts, closed=True, **kwargs)", + "docstring": "Parameters ---------- x, y : float The x and y coordinates of the arrow base. dx, dy : float The length of the arrow along x and y direction. width : float, default: 0.001 Width of full arrow tail. length_includes_head : bool, default: False True if head is to be counted in calculating the length. head_width : float or None, default: 3*width Total width of the full arrow head. head_length : float or None, default: 1.5*head_width Length of arrow head. shape : {'full', 'left', 'right'}, default: 'full' Draw the left-half, right-half, or full arrow. overhang : float, default: 0 Fraction that the arrow is swept back (0 overhang means triangular shape). Can be negative or greater than one. head_starts_at_zero : bool, default: False If True, the head starts being drawn at coordinate 0 instead of ending at coordinate 0. **kwargs properties: %(Patch:kwdoc)s", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:x arg:y arg:dx arg:dy arguments arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Call Call Call" + }, + { + "library": "matplotlib", + "name": "render_glyph", + "source_code": "def render_glyph(self, output: Output, ox: float, oy: float, font: str, font_class: str, sym: str, fontsize: float, dpi: float) -> None:\n info = self._get_info(font, font_class, sym, fontsize, dpi)\n output.glyphs.append((ox, oy, info))", + "docstring": "At position (*ox*, *oy*), draw the glyph specified by the remaining parameters (see for their detailed description).", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py", + "ast_data": "FunctionDef name:render_glyph arg:self arg:output arg:ox arg:oy arg:font arg:font_class arg:sym arg:fontsize arg:dpi arguments arg arg arg arg arg arg arg arg arg Assign Call Call" + }, + { + "library": "cryptography", + "name": "public_key", + "source_code": "@abc.abstractmethod\ndef public_key(self) -> X448PublicKey:\n pass", + "docstring": "Returns the public key associated with this private key", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x448.py", + "ast_data": "FunctionDef name:public_key arg:self arguments arg" + }, + { + "library": "scipy", + "name": "arg_casts", + "source_code": "def arg_casts(argtype):\n if argtype in NPY_TYPES.values():\n return f'<{argtype}*>'\n return ''", + "docstring": "Cast from Cython to Numpy complex pointer types.", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\_generate_pyx.py", + "ast_data": "FunctionDef name:arg_casts arg:argtype arguments arg If Compare Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "locator_params", + "source_code": "def locator_params(self, axis='both', tight=None, **kwargs):\n _api.check_in_list([*self._axis_names, 'both'], axis=axis)\n for name in self._axis_names:\n if axis in [name, 'both']:\n loc = self._axis_map[name].get_major_locator()\n loc.set_params(**kwargs)\n self._request_autoscale_view(name, tight=tight)\n self.stale = True", + "docstring": "Control behavior of major tick locators. Because the locator is involved in autoscaling, is called automatically after the parameters are changed. Parameters ---------- axis : {'both', 'x', 'y'}, default: 'both' The axis on which to operate. (For 3D Axes, *axis* can also be set to 'z', and 'both' refers to all three axes.) tight : bool or None, optional Parameter passed to . Default is None, for no change. Other Parameters ---------------- **kwargs Remaining keyword arguments are passed to directly to the `~.ticker.MaxNLocator.set_params.ticker.MaxNLocator` used by default for linear. Examples -------- When plotting small subplots, one might want to reduce the maximum number of ticks and use tight bounds, for example:: ax.locator_params(tight=True, nbins=4)", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:locator_params arg:self arg:axis arg:tight arguments arg arg arg arg Call For If Compare Assign Call Call Call Assign" + }, + { + "library": "pytorch", + "name": "variance", + "source_code": "@lazy_property\ndef variance(self) -> Tensor:\n return 1 - (_log_modified_bessel_fn(self.concentration, order=1) - _log_modified_bessel_fn(self.concentration, order=0)).exp()", + "docstring": "The provided variance is the circular one.", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\von_mises.py", + "ast_data": "FunctionDef name:variance arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "matplotlib", + "name": "text_2d_to_3d", + "source_code": "def text_2d_to_3d(obj, z=0, zdir='z', axlim_clip=False):\n obj.__class__ = Text3D\n obj.set_3d_properties(z, zdir, axlim_clip)", + "docstring": "Convert a to a object. Parameters ---------- z : float The z-position in 3D space. zdir : {'x', 'y', 'z', 3-tuple} The direction of the text. Default: 'z'. See for a description of the values. axlim_clip : bool, default: False Whether to hide text outside the axes view limits. .. versionadded:: 3.10", + "type": "function", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", + "ast_data": "FunctionDef name:text_2d_to_3d arg:obj arg:z arg:zdir arg:axlim_clip arguments arg arg arg arg Assign Call" + }, + { + "library": "kornia", + "name": "rgb_to_rgb255", + "source_code": "def rgb_to_rgb255(image: Tensor) -> Tensor:\n KORNIA_CHECK_IS_COLOR(image)\n rgb255 = (image * 255).clip(0.0, 255.0)\n return rgb255", + "docstring": "Convert an image from RGB to RGB [0, 255] for visualization purposes. Args: image: RGB Image to be converted to RGB [0, 255] of shape :math:. Returns: RGB version of the image with shape of shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb_to_rgb255(input) # 2x3x4x5", + "type": "function", + "file_path": "kornia\\kornia\\color\\rgb.py", + "ast_data": "FunctionDef name:rgb_to_rgb255 arg:image arguments arg Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_verify_placeholder_names", + "source_code": "def _verify_placeholder_names(gm: torch.fx.GraphModule, sig: ExportGraphSignature) -> None:\n name_to_kind = {spec.arg.name: spec.kind for spec in sig.input_specs}\n for mod in gm.modules():\n if not isinstance(mod, torch.fx.GraphModule):\n continue\n for node in mod.graph.nodes:\n if node.op == 'placeholder':\n if node.name not in name_to_kind:\n continue\n node_kind = name_to_kind[node.name]\n prefix = placeholder_prefixes[node_kind]\n if not node.name.startswith(prefix):\n raise SpecViolationError(f'Placeholder node name {node.name} does not follow spec for {node_kind}, name should have prefix: {prefix}')", + "docstring": "Performs a sanity check on the placeholder node names. - User input nodes: no restrictions, should match the original forward() signature - Params/buffers/constants/custom_obj/token nodes: should start with prefixes defined in", + "type": "function", + "file_path": "pytorch\\torch\\export\\_trace.py", + "ast_data": "FunctionDef name:_verify_placeholder_names arg:gm arg:sig arguments arg arg Assign For Call If Call For If Compare If Compare Assign Assign If Call Raise Call" + }, + { + "library": "tensorflow", + "name": "composite_tensor_to_variants", + "source_code": "def composite_tensor_to_variants(value, type_spec=None, name=None):\n if not isinstance(value, composite_tensor.CompositeTensor):\n raise TypeError(f'Expected `value` to be a CompositeTensor. Received {type(value)}.')\n if type_spec is None:\n type_spec = value._type_spec\n if not type_spec.is_compatible_with(value):\n raise ValueError(f'`type_spec` {type_spec} is not compatible with `value` {value!r}.')\n metadata = composite_tensor_variant_pb2.CompositeTensorVariantMetadata()\n metadata.type_spec_proto.CopyFrom(nested_structure_coder.encode_structure(type_spec).type_spec_value)\n return gen_composite_tensor_ops.CompositeTensorVariantFromComponents(components=nest.flatten(value, expand_composites=True), metadata=metadata.SerializeToString(), name=name)", + "docstring": "Encodes as a scalar variant tensor. Args: value: The value to encode. type_spec: Information about the value's type that should be included in the encoding. name: Optional name for the operation. Returns: A Tensor with shape= and dtype=. Raises: ValueError: If is not compatible with .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\composite_tensor_ops.py", + "ast_data": "FunctionDef name:composite_tensor_to_variants arg:value arg:type_spec arg:name arguments arg arg arg If Call Raise Call Call If Compare Assign If Call Raise Call Assign Call Call Call Return return:yes Call Call Call" + }, + { + "library": "sphinx", + "name": "get_options", + "source_code": "def get_options(self, overrides: dict[str, Any] | None=None) -> dict[str, Any]:\n if overrides is None:\n overrides = {}\n options = self._options.copy()\n for option, value in overrides.items():\n if option not in options:\n logger.warning(__('unsupported theme option %r given'), option)\n else:\n options[option] = value\n return options", + "docstring": "Return a dictionary of theme options and their values.", + "type": "method", + "file_path": "sphinx\\sphinx\\theming.py", + "ast_data": "FunctionDef name:get_options arg:self arg:overrides arguments arg arg If Compare Assign Assign Call For Call If Compare Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "get_release_version", + "source_code": "def get_release_version(onto_branch: str) -> Optional[str]:\n m = re.match(RELEASE_BRANCH_REGEX, onto_branch)\n return m.group('version') if m else ''", + "docstring": "Return the release version if the target branch is a release branch", + "type": "function", + "file_path": "pytorch\\.github\\scripts\\cherry_pick.py", + "ast_data": "FunctionDef name:get_release_version arg:onto_branch arguments arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "max_unpool1d", + "source_code": "def max_unpool1d(input: Tensor, indices: Tensor, kernel_size: BroadcastingList1[int], stride: Optional[BroadcastingList1[int]]=None, padding: BroadcastingList1[int]=0, output_size: Optional[BroadcastingList1[int]]=None) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(max_unpool1d, (input,), input, indices, kernel_size, stride=stride, padding=padding, output_size=output_size)\n kernel_size = _single(kernel_size)\n if stride is not None:\n _stride = _single(stride)\n else:\n _stride = kernel_size\n padding = _single(padding)\n output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)\n if isinstance(output_size, list):\n output_size = output_size + [1]\n else:\n output_size = output_size + (1,)\n return torch._C._nn.max_unpool2d(input.unsqueeze(-1), indices.unsqueeze(-1), output_size).squeeze(-1)", + "docstring": "Compute a partial inverse of :class:. See :class: for details.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:max_unpool1d arg:input arg:indices arg:kernel_size arg:stride arg:padding arg:output_size arguments arg arg arg arg arg arg If Call Return return:yes Call Assign Call If Compare Assign Call Assign Assign Call Assign Call If Call Assign Assign Return return:yes Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "_extend_lower", + "source_code": "def _extend_lower(self):\n minmax = 'max' if self.long_axis.get_inverted() else 'min'\n return self.extend in ('both', minmax)", + "docstring": "Return whether the lower limit is open ended.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py", + "ast_data": "FunctionDef name:_extend_lower arg:self arguments arg Assign Call Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "DispatchKeysetRepr", + "source_code": "class DispatchKeysetRepr(gdb.Command):\n\n def __init__(self) -> None:\n gdb.Command.__init__(self, 'torch-dispatch-keyset-repr', gdb.COMMAND_USER, gdb.COMPLETE_EXPRESSION)\n\n def invoke(self, args: str, from_tty: bool) -> None:\n args = gdb.string_to_argv(args)\n if len(args) != 1:\n print('Usage: torch-dispatch-keyset-repr EXP')\n return\n keyset = args[0]\n with DisableBreakpoints():\n res = gdb.parse_and_eval(f'torch::gdb::dispatch_keyset_string({keyset})')\n res = str(res)\n print(res[res.find('\"') + 1:-1])", + "docstring": "Print human readable representation of c10::DispatchKeyset", + "type": "class", + "file_path": "pytorch\\tools\\gdb\\pytorch-gdb.py", + "ast_data": "ClassDef name:DispatchKeysetRepr FunctionDef name:__init__ arg:self arguments arg Call FunctionDef name:invoke arg:self arg:args arg:from_tty arguments arg arg arg Assign Call If Compare Call Call Return return:no Assign With Call Assign Call Assign Call Call Call" + }, + { + "library": "tensorflow", + "name": "should_stop", + "source_code": "def should_stop(self):\n return self._coord.should_stop()", + "docstring": "Check if the coordinator was told to stop. See . Returns: True if the coordinator was told to stop, False otherwise.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py", + "ast_data": "FunctionDef name:should_stop arg:self arguments arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "window_partition", + "source_code": "def window_partition(x: Tensor, window_size: int) -> tuple[Tensor, tuple[int, int]]:\n B, H, W, C = x.shape\n pad_h = (window_size - H % window_size) % window_size\n pad_w = (window_size - W % window_size) % window_size\n if pad_h > 0 or pad_w > 0:\n x = pad(x, (0, 0, 0, pad_w, 0, pad_h))\n Hp, Wp = (H + pad_h, W + pad_w)\n x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n return (windows, (Hp, Wp))", + "docstring": "Partition into non-overlapping windows with padding if needed. Args: x: input tokens with [B, H, W, C]. window_size: window size. Returns: windows: windows after partition with [B * num_windows, window_size, window_size, C]. (Hp, Wp): padded height and width before partition", + "type": "function", + "file_path": "kornia\\kornia\\contrib\\models\\common.py", + "ast_data": "FunctionDef name:window_partition arg:x arg:window_size arguments arg arg Assign Assign Assign If BoolOp Compare Compare Assign Call Assign Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "swappable_dependency", + "source_code": "def swappable_dependency(value):\n return SwappableTuple((value.split('.', 1)[0], '__first__'), value)", + "docstring": "Turn a setting value into a dependency.", + "type": "function", + "file_path": "django\\django\\db\\migrations\\migration.py", + "ast_data": "FunctionDef name:swappable_dependency arg:value arguments arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "normal_equation_projections", + "source_code": "def normal_equation_projections(A, m, n, orth_tol, max_refin, tol):\n with catch_warnings(action='ignore', category=CholmodTypeConversionWarning):\n factor = cholesky_AAt(A)\n\n def null_space(x):\n v = factor(A.dot(x))\n z = x - A.T.dot(v)\n k = 0\n while orthogonality(A, z) > orth_tol:\n if k >= max_refin:\n break\n v = factor(A.dot(z))\n z = z - A.T.dot(v)\n k += 1\n return z\n\n def least_squares(x):\n return factor(A.dot(x))\n\n def row_space(x):\n return A.T.dot(factor(x))\n return (null_space, least_squares, row_space)", + "docstring": "Return linear operators for matrix A using `` approach.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\projections.py", + "ast_data": "FunctionDef name:normal_equation_projections arg:A arg:m arg:n arg:orth_tol arg:max_refin arg:tol arguments arg arg arg arg arg arg With Call Assign Call FunctionDef name:null_space arg:x arguments arg Assign Call Call Assign Call Assign While Compare Call If Compare Assign Call Call Assign Call Return return:yes FunctionDef name:least_squares arg:x arguments arg Return return:yes Call Call FunctionDef name:row_space arg:x arguments arg Return return:yes Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "value_type", + "source_code": "@property\ndef value_type(self):\n return Tensor", + "docstring": "The Python type for values that are compatible with this TypeSpec.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py", + "ast_data": "FunctionDef name:value_type arg:self arguments arg Return return:yes" + }, + { + "library": "cherrypy", + "name": "check_compatibility", + "source_code": "def check_compatibility(self):\n self._compat(cherrypy.config)\n for sn, app in cherrypy.tree.apps.items():\n if not isinstance(app, cherrypy.Application):\n continue\n self._compat(app.config)", + "docstring": "Process config and warn on each obsolete or deprecated entry.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpchecker.py", + "ast_data": "FunctionDef name:check_compatibility arg:self arguments arg Call For Call If Call Call" + }, + { + "library": "sphinx", + "name": "write_doctree", + "source_code": "@final\ndef write_doctree(self, docname: str, doctree: nodes.document, *, _cache: bool=True) -> None:\n doctree.reporter = None\n doctree.transformer = None\n doctree.settings = doctree.settings.copy()\n doctree.settings.warning_stream = None\n doctree.settings.env = None\n doctree.settings.record_dependencies = None\n doctree_filename = self.doctreedir / f'{docname}.doctree'\n doctree_filename.parent.mkdir(parents=True, exist_ok=True)\n with open(doctree_filename, 'wb') as f:\n pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)\n if _cache:\n self.env._write_doc_doctree_cache[docname] = doctree", + "docstring": "Write the doctree to a file, to be used as a cache by re-builds.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\__init__.py", + "ast_data": "FunctionDef name:write_doctree arg:self arg:docname arg:doctree arguments arg arg arg arg Assign Assign Assign Call Assign Assign Assign Assign Call With Call Call If Assign" + }, + { + "library": "pandas", + "name": "__from_arrow__", + "source_code": "def __from_arrow__(self, array: pyarrow.Array | pyarrow.ChunkedArray) -> BaseMaskedArray:\n import pyarrow\n from pandas.core.arrays.arrow._arrow_utils import pyarrow_array_to_numpy_and_mask\n array_class = self.construct_array_type()\n pyarrow_type = pyarrow.from_numpy_dtype(self.type)\n if not array.type.equals(pyarrow_type) and (not pyarrow.types.is_null(array.type)):\n rt_dtype = pandas_dtype(array.type.to_pandas_dtype())\n if rt_dtype.kind not in 'iuf':\n raise TypeError(f'Expected array of {self} type, got {array.type} instead')\n array = array.cast(pyarrow_type)\n if isinstance(array, pyarrow.ChunkedArray):\n if array.num_chunks == 0:\n array = pyarrow.array([], type=array.type)\n else:\n array = array.combine_chunks()\n data, mask = pyarrow_array_to_numpy_and_mask(array, dtype=self.numpy_dtype)\n return array_class(data.copy(), ~mask, copy=False)", + "docstring": "Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\numeric.py", + "ast_data": "FunctionDef name:__from_arrow__ arg:self arg:array arguments arg arg Assign Call Assign Call If BoolOp Call Call Assign Call Call If Compare Raise Call Assign Call If Call If Compare Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "method_decorator", + "source_code": "def method_decorator(decorator, name=''):\n\n def _dec(obj):\n if not isinstance(obj, type):\n return _multi_decorate(decorator, obj)\n if not (name and hasattr(obj, name)):\n raise ValueError(\"The keyword argument `name` must be the name of a method of the decorated class: %s. Got '%s' instead.\" % (obj, name))\n method = getattr(obj, name)\n if not callable(method):\n raise TypeError(\"Cannot decorate '%s' as it isn't a callable attribute of %s (%s).\" % (name, obj, method))\n _wrapper = _multi_decorate(decorator, method)\n setattr(obj, name, _wrapper)\n return obj\n if not hasattr(decorator, '__iter__'):\n update_wrapper(_dec, decorator)\n obj = decorator if hasattr(decorator, '__name__') else decorator.__class__\n _dec.__name__ = 'method_decorator(%s)' % obj.__name__\n return _dec", + "docstring": "Convert a function decorator into a method decorator", + "type": "function", + "file_path": "django\\django\\utils\\decorators.py", + "ast_data": "FunctionDef name:method_decorator arg:decorator arg:name arguments arg arg FunctionDef name:_dec arg:obj arguments arg If Call Return return:yes Call If BoolOp Call Raise Call Assign Call If Call Raise Call Assign Call Call Return return:yes If Call Call Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "add_scalar", + "source_code": "def add_scalar(self, tag, scalar_value, global_step=None, walltime=None, new_style=False, double_precision=False):\n torch._C._log_api_usage_once('tensorboard.logging.add_scalar')\n summary = scalar(tag, scalar_value, new_style=new_style, double_precision=double_precision)\n self._get_file_writer().add_summary(summary, global_step, walltime)", + "docstring": "Add scalar data to summary. Args: tag (str): Data identifier scalar_value (float or string/blobname): Value to save global_step (int): Global step value to record walltime (float): Optional override default walltime (time.time()) with seconds after epoch of event new_style (boolean): Whether to use new style (tensor field) or old style (simple_value field). New style could lead to faster data loading. Examples:: from torch.utils.tensorboard import SummaryWriter writer = SummaryWriter() x = range(100) for i in x: writer.add_scalar('y=2x', i * 2, i) writer.close() Expected result: .. image:: _static/img/tensorboard/add_scalar.png :scale: 50 %", + "type": "method", + "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py", + "ast_data": "FunctionDef name:add_scalar arg:self arg:tag arg:scalar_value arg:global_step arg:walltime arg:new_style arg:double_precision arguments arg arg arg arg arg arg arg Call Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "OnnxExporterWarning", + "source_code": "class OnnxExporterWarning(UserWarning):\n pass", + "docstring": "Warnings in the ONNX exporter.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\errors.py", + "ast_data": "ClassDef name:OnnxExporterWarning" + }, + { + "library": "tensorflow", + "name": "apply", + "source_code": "def apply(self, transformation_func) -> 'DatasetV2':\n dataset = transformation_func(self)\n if not isinstance(dataset, data_types.DatasetV2):\n raise TypeError(f'`transformation_func` must return a `tf.data.Dataset` object. Got {type(dataset)}.')\n dataset._input_datasets = [self]\n return dataset", + "docstring": "Applies a transformation function to this dataset. enables chaining of custom transformations, which are represented as functions that take one argument and return a transformed . >>> dataset = tf.data.Dataset.range(100) >>> def dataset_fn(ds): ... return ds.filter(lambda x: x >> dataset = dataset.apply(dataset_fn) >>> [a.item() for a in dataset.as_numpy_iterator()] [0, 1, 2, 3, 4] Args: transformation_func: A function that takes one argument and returns a . Returns: A new with the transformation applied as described above.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "FunctionDef name:apply arg:self arg:transformation_func arguments arg arg Assign Call If Call Raise Call Call Assign Return return:yes" + }, + { + "library": "pygame", + "name": "_parse_font_entry_unix", + "source_code": "def _parse_font_entry_unix(entry, fonts):\n filename, family, style = entry.split(':', 2)\n if splitext(filename)[1].lower() in OpenType_extensions:\n bold = 'Bold' in style\n italic = 'Italic' in style\n oblique = 'Oblique' in style\n for name in family.strip().split(','):\n if name:\n break\n else:\n name = splitext(basename(filename))[0]\n _addfont(_simplename(name), bold, italic or oblique, filename, fonts)", + "docstring": "Parses an entry in the unix font data to add to the pygame font dictionary. :param entry: A entry from the unix font list. :param fonts: The pygame font dictionary to add the parsed font data to.", + "type": "function", + "file_path": "pygame\\src_py\\sysfont.py", + "ast_data": "FunctionDef name:_parse_font_entry_unix arg:entry arg:fonts arguments arg arg Assign Call If Compare Call Call Assign Compare Assign Compare Assign Compare For Call Call If Assign Call Call Call Call BoolOp" + }, + { + "library": "pandas", + "name": "asof_locs", + "source_code": "def asof_locs(self, where: Index, mask: npt.NDArray[np.bool_]) -> np.ndarray:\n if isinstance(where, DatetimeIndex):\n where = PeriodIndex(where._values, freq=self.freq)\n elif not isinstance(where, PeriodIndex):\n raise TypeError('asof_locs `where` must be DatetimeIndex or PeriodIndex')\n return super().asof_locs(where, mask)", + "docstring": "where : array of timestamps mask : np.ndarray[bool] Array of booleans where data is not NA.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\period.py", + "ast_data": "FunctionDef name:asof_locs arg:self arg:where arg:mask arguments arg arg arg If Call Assign Call If Call Raise Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_initial_nodes_a", + "source_code": "def _initial_nodes_a(n, k):\n tauk = _compute_tauk(n, k)\n sigk = cos(0.5 * tauk) ** 2\n a = n % 2 - 0.5\n nu = 4.0 * floor(n / 2.0) + 2.0 * a + 2.0\n xksq = nu * sigk - 1.0 / (3.0 * nu) * (5.0 / (4.0 * (1.0 - sigk) ** 2) - 1.0 / (1.0 - sigk) - 0.25)\n return xksq", + "docstring": "Tricomi initial guesses Computes an initial approximation to the square of the -th (positive) root :math: of the Hermite polynomial :math: of order :math:. The formula is the one from lemma 3.1 in the original paper. The guesses are accurate except in the region near :math:. Parameters ---------- n : int Quadrature order k : ndarray of type int Index of roots to compute Returns ------- xksq : ndarray Square of the approximate roots See Also -------- initial_nodes roots_hermite_asy", + "type": "function", + "file_path": "scipy\\scipy\\special\\_orthogonal.py", + "ast_data": "FunctionDef name:_initial_nodes_a arg:n arg:k arguments arg arg Assign Call Assign Call Assign Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "prepare_model_outputs", + "source_code": "def prepare_model_outputs(float_module: nn.Module, q_module: nn.Module, logger_cls=OutputLogger, allow_list=None) -> None:\n torch._C._log_api_usage_once('quantization_api._numeric_suite.prepare_model_outputs')\n if allow_list is None:\n allow_list = get_default_compare_output_module_list()\n qconfig_debug = torch.ao.quantization.QConfig(activation=logger_cls, weight=None)\n float_module.qconfig = qconfig_debug\n prepare(float_module, inplace=True, allow_list=allow_list, prepare_custom_config_dict={})\n q_module.qconfig = qconfig_debug\n prepare(q_module, inplace=True, allow_list=allow_list, observer_non_leaf_module_list=NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST, prepare_custom_config_dict={})", + "docstring": "Prepare the model by attaching the logger to both float module and quantized module if they are in the allow_list. Args: float_module: float module used to generate the q_module q_module: module quantized from float_module logger_cls: type of logger to be attached to float_module and q_module allow_list: list of module types to attach logger", + "type": "function", + "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite.py", + "ast_data": "FunctionDef name:prepare_model_outputs arg:float_module arg:q_module arg:logger_cls arg:allow_list arguments arg arg arg arg Call If Compare Assign Call Assign Call Assign Call Assign Call" + }, + { + "library": "scipy", + "name": "_align_32", + "source_code": "def _align_32(f):\n pos = f.tell()\n if pos % 4 != 0:\n f.seek(pos + 4 - pos % 4)\n return", + "docstring": "Align to the next 32-bit position in a file", + "type": "function", + "file_path": "scipy\\scipy\\io\\_idl.py", + "ast_data": "FunctionDef name:_align_32 arg:f arguments arg Assign Call If Compare Call Return return:no" + }, + { + "library": "pytorch", + "name": "stack_trace", + "source_code": "@property\n@abc.abstractmethod\ndef stack_trace(self) -> str | None:\n ...", + "docstring": "The stack trace associated with this node.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py", + "ast_data": "FunctionDef name:stack_trace arg:self arguments arg" + }, + { + "library": "tensorflow", + "name": "should_cast", + "source_code": "def should_cast(self, v):\n return self._dvariable.save_as_bf16 and v.dtype == dtypes.float32", + "docstring": "Returns True if v has float32 dtype and is intructed to save as bf16. Args: v : The variable that determines whether to cast. Returns: True if current savable DVariable is instructed to save as bfloat16 and the variable has dtype float32.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\d_variable.py", + "ast_data": "FunctionDef name:should_cast arg:self arg:v arguments arg arg Return return:yes BoolOp Compare" + }, + { + "library": "numpy", + "name": "descr_to_dtype", + "source_code": "@set_module('numpy.lib.format')\ndef descr_to_dtype(descr):\n if isinstance(descr, str):\n return numpy.dtype(descr)\n elif isinstance(descr, tuple):\n dt = descr_to_dtype(descr[0])\n return numpy.dtype((dt, descr[1]))\n titles = []\n names = []\n formats = []\n offsets = []\n offset = 0\n for field in descr:\n if len(field) == 2:\n name, descr_str = field\n dt = descr_to_dtype(descr_str)\n else:\n name, descr_str, shape = field\n dt = numpy.dtype((descr_to_dtype(descr_str), shape))\n is_pad = name == '' and dt.type is numpy.void and (dt.names is None)\n if not is_pad:\n title, name = name if isinstance(name, tuple) else (None, name)\n titles.append(title)\n names.append(name)\n formats.append(dt)\n offsets.append(offset)\n offset += dt.itemsize\n return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, 'offsets': offsets, 'itemsize': offset})", + "docstring": "Returns a dtype based off the given description. This is essentially the reverse of . It will remove the valueless padding fields created by, i.e. simple fields like dtype('float32'), and then convert the description to its corresponding dtype. Parameters ---------- descr : object The object retrieved by dtype.descr. Can be passed to in order to replicate the input dtype. Returns ------- dtype : dtype The dtype constructed by the description.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_format_impl.py", + "ast_data": "FunctionDef name:descr_to_dtype arg:descr arguments arg If Call Return return:yes Call If Call Assign Call Return return:yes Call Assign Assign Assign Assign Assign For If Compare Call Assign Assign Call Assign Assign Call Call Assign BoolOp Compare Compare Compare If Assign Call Call Call Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "scalar", + "source_code": "def scalar(name, tensor, family=None, step=None):\n\n def function(tag, scope):\n return gen_summary_ops.write_scalar_summary(_summary_state.writer._resource, _choose_step(step), tag, array_ops.identity(tensor), name=scope)\n return summary_writer_function(name, tensor, function, family=family)", + "docstring": "Writes a scalar summary if possible. Unlike this op may change the dtype depending on the writer, for both practical and efficiency concerns. Args: name: An arbitrary name for this summary. tensor: A Must be one of the following types: , , , , , , , , , , . family: Optional, the summary's family. step: The monotonic step variable, which defaults to . Returns: The created or a if summary writing has not been enabled for this context.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:scalar arg:name arg:tensor arg:family arg:step arguments arg arg arg arg FunctionDef name:function arg:tag arg:scope arguments arg arg Return return:yes Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "check_axis_name_return_reason", + "source_code": "@staticmethod\ndef check_axis_name_return_reason(name: str, allow_underscore: bool=False) -> tuple[bool, str]:\n if not str.isidentifier(name):\n return (False, 'not a valid python identifier')\n elif name[0] == '_' or name[-1] == '_':\n if name == '_' and allow_underscore:\n return (True, '')\n return (False, 'axis name should should not start or end with underscore')\n else:\n if keyword.iskeyword(name):\n warnings.warn(f'It is discouraged to use axes names that are keywords: {name}', RuntimeWarning)\n if name in ['axis']:\n warnings.warn(\"It is discouraged to use 'axis' as an axis name and will raise an error in future\", FutureWarning)\n return (True, '')", + "docstring": "Check if the given axis name is valid, and a message explaining why if not. Valid axes names are python identifiers except keywords, and should not start or end with an underscore. Args: name (str): the axis name to check allow_underscore (bool): whether axis names are allowed to start with an underscore Returns: tuple[bool, str]: whether the axis name is valid, a message explaining why if not", + "type": "method", + "file_path": "pytorch\\functorch\\einops\\_parsing.py", + "ast_data": "FunctionDef name:check_axis_name_return_reason arg:name arg:allow_underscore arguments arg arg If Call Return return:yes If BoolOp Compare Compare If BoolOp Compare Return return:yes Return return:yes If Call Call If Compare Call Return return:yes" + }, + { + "library": "django", + "name": "backwards_plan", + "source_code": "def backwards_plan(self, target):\n if target not in self.nodes:\n raise NodeNotFoundError('Node %r not a valid node' % (target,), target)\n return self.iterative_dfs(self.node_map[target], forwards=False)", + "docstring": "Given a node, return a list of which dependent nodes (dependencies) must be unapplied, ending with the node itself. This is the list you would follow if removing the migrations from a database.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\graph.py", + "ast_data": "FunctionDef name:backwards_plan arg:self arg:target arguments arg arg If Compare Raise Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "extract_compile_commands", + "source_code": "def extract_compile_commands(parsed_aquery_output: _JSONDict) -> list[CompileCommand]:\n actions = parsed_aquery_output['actions']\n commands = []\n for action in actions:\n command = CompileCommand.from_args_list(action['arguments'])\n commands.append(command)\n return commands", + "docstring": "Gathers compile commands to run from JSON output. Arguments: parsed_aquery_output: Parsed JSON representing the output of . Returns: The list of CompileCommands that should be executed.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\build_tools\\lint\\generate_compile_commands.py", + "ast_data": "FunctionDef name:extract_compile_commands arg:parsed_aquery_output arguments arg Assign Assign For Assign Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "initialize_read", + "source_code": "def initialize_read(self):\n self._file_reader = VarReader5(self)\n self._matrix_reader = VarReader5(self)", + "docstring": "Run when beginning read of variables Sets up readers from parameters in", + "type": "method", + "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py", + "ast_data": "FunctionDef name:initialize_read arg:self arguments arg Assign Call Assign Call" + }, + { + "library": "pytorch", + "name": "threshold", + "source_code": "@register_decomposition(aten.threshold)\n@_inplace_wrapper\n@out_wrapper()\n@elementwise_type_promotion_wrapper(type_promoting_args=('a',), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef threshold(a: TensorLikeType, threshold: NumberType, value: Union[bool, int, float], inplace: bool=False) -> TensorLikeType:\n if inplace:\n raise NotImplementedError\n return torch.where(a <= threshold, value, a)", + "docstring": "Reference implementation of torch.nn.functional.threshold", + "type": "function", + "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py", + "ast_data": "FunctionDef name:threshold arg:a arg:threshold arg:value arg:inplace arguments arg arg arg arg If Raise Return return:yes Call Compare Call Call Call" + }, + { + "library": "matplotlib", + "name": "_Token", + "source_code": "class _Token:\n __slots__ = ('pos', 'raw')\n kind = '?'\n\n def __init__(self, pos, raw):\n _log.debug('type1font._Token %s at %d: %r', self.kind, pos, raw)\n self.pos = pos\n self.raw = raw\n\n def __str__(self):\n return f'<{self.kind} {self.raw} @{self.pos}>'\n\n def endpos(self):\n return self.pos + len(self.raw)\n\n def is_keyword(self, *names):\n return False\n\n def is_slash_name(self):\n return False\n\n def is_delim(self):\n return False\n\n def is_number(self):\n return False\n\n def value(self):\n return self.raw", + "docstring": "A token in a PostScript stream. Attributes ---------- pos : int Position, i.e. offset from the beginning of the data. raw : str Raw text of the token. kind : str Description of the token (for debugging or testing).", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\_type1font.py", + "ast_data": "ClassDef name:_Token Assign Assign FunctionDef name:__init__ arg:self arg:pos arg:raw arguments arg arg arg Call Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:endpos arg:self arguments arg Return return:yes Call FunctionDef name:is_keyword arg:self arguments arg arg Return return:yes FunctionDef name:is_slash_name arg:self arguments arg Return return:yes FunctionDef name:is_delim arg:self arguments arg Return return:yes FunctionDef name:is_number arg:self arguments arg Return return:yes FunctionDef name:value arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "coords", + "source_code": "def coords(self, device_idx: int) -> tensor.Tensor:\n strides = ops.convert_to_tensor(self.strides)\n shape = ops.convert_to_tensor(self.shape())\n return device_idx // strides % shape", + "docstring": "Converts the device index into a tensor of mesh coordinates.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py", + "ast_data": "FunctionDef name:coords arg:self arg:device_idx arguments arg arg Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, name=None):\n filenames = _create_or_validate_filenames_dataset(filenames, name=name)\n self._filenames = filenames\n self._compression_type = compression_type\n self._buffer_size = buffer_size\n\n def creator_fn(filename):\n return _TextLineDataset(filename, compression_type, buffer_size, name=name)\n self._impl = _create_dataset_reader(creator_fn, filenames, num_parallel_reads, name=name)\n variant_tensor = self._impl._variant_tensor\n super(TextLineDatasetV2, self).__init__(variant_tensor)", + "docstring": "Creates a . The elements of the dataset will be the lines of the input files, using the newline character '\\n' to denote line splits. The newline characters will be stripped off of each element. Args: filenames: A whose elements are scalars, a tensor, or a value that can be converted to a tensor (such as a list of Python strings). compression_type: (Optional.) A scalar evaluating to one of (no compression), , or . buffer_size: (Optional.) A scalar denoting the number of bytes to buffer. A value of 0 results in the default buffering values chosen based on the compression type. num_parallel_reads: (Optional.) A scalar representing the number of files to read in parallel. If greater than one, the records of files read in parallel are outputted in an interleaved order. If your input pipeline is I/O bottlenecked, consider setting this parameter to a value greater than one to parallelize the I/O. If , files will be read sequentially. name: (Optional.) A name for the tf.data operation.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:filenames arg:compression_type arg:buffer_size arg:num_parallel_reads arg:name arguments arg arg arg arg arg arg Assign Call Assign Assign Assign FunctionDef name:creator_fn arg:filename arguments arg Return return:yes Call Assign Call Assign Call Call" + }, + { + "library": "matplotlib", + "name": "anchored", + "source_code": "def anchored(self, c, container):\n l, b, w, h = container.bounds\n L, B, W, H = self.bounds\n cx, cy = self.coefs[c] if isinstance(c, str) else c\n return Bbox(self._points + [l + cx * (w - W) - L, b + cy * (h - H) - B])", + "docstring": "Return a copy of the anchored to *c* within *container*. Parameters ---------- c : (float, float) or {'C', 'SW', 'S', 'SE', 'E', 'NE', ...} Either an (*x*, *y*) pair of relative coordinates (0 is left or bottom, 1 is right or top), 'C' (center), or a cardinal direction ('SW', southwest, is bottom left, etc.). container : The box within which the is positioned. See Also -------- .Axes.set_anchor", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:anchored arg:self arg:c arg:container arguments arg arg arg Assign Assign Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_default_group_nodes_for_combo_kernels", + "source_code": "@staticmethod\ndef _default_group_nodes_for_combo_kernels(scheduler: Scheduler) -> list[list[BaseSchedulerNode]]:\n sorted_nodes = scheduler._topological_sort_nodes()\n grouped_nodes = []\n max_num_nodes = 8\n for nodes in sorted_nodes:\n grouped_nodes.extend([nodes[i:i + max_num_nodes] for i in range(0, len(nodes), max_num_nodes)])\n return grouped_nodes", + "docstring": "Returns a list of lists of nodes that are to be grouped together.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:_default_group_nodes_for_combo_kernels arg:scheduler arguments arg Assign Call Assign Assign For Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_default_dynamic_quant_module_mappings", + "source_code": "def get_default_dynamic_quant_module_mappings() -> dict[Callable, Any]:\n return DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS", + "docstring": "Get module mapping for post training dynamic quantization", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py", + "ast_data": "FunctionDef name:get_default_dynamic_quant_module_mappings arguments Return return:yes" + }, + { + "library": "django", + "name": "add_action", + "source_code": "def add_action(self, action, name=None):\n name = name or action.__name__\n self._actions[name] = action\n self._global_actions[name] = action", + "docstring": "Register an action to be available globally.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\sites.py", + "ast_data": "FunctionDef name:add_action arg:self arg:action arg:name arguments arg arg arg Assign BoolOp Assign Assign" + }, + { + "library": "pandas", + "name": "_wrap_decimal_thousands", + "source_code": "def _wrap_decimal_thousands(formatter: Callable, decimal: str, thousands: str | None) -> Callable:\n\n def wrapper(x):\n if is_float(x) or is_integer(x) or is_complex(x):\n if decimal != '.' and thousands is not None and (thousands != ','):\n return formatter(x).replace(',', '§_§-').replace('.', decimal).replace('§_§-', thousands)\n elif decimal != '.' and (thousands is None or thousands == ','):\n return formatter(x).replace('.', decimal)\n elif decimal == '.' and thousands is not None and (thousands != ','):\n return formatter(x).replace(',', thousands)\n return formatter(x)\n return wrapper", + "docstring": "Takes a string formatting function and wraps logic to deal with thousands and decimal parameters, in the case that they are non-standard and that the input is a (float, complex, int).", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\style_render.py", + "ast_data": "FunctionDef name:_wrap_decimal_thousands arg:formatter arg:decimal arg:thousands arguments arg arg arg FunctionDef name:wrapper arg:x arguments arg If BoolOp Call Call Call If BoolOp Compare Compare Compare Return return:yes Call Call Call Call If BoolOp Compare BoolOp Compare Compare Return return:yes Call Call If BoolOp Compare Compare Compare Return return:yes Call Call Return return:yes Call Return return:yes" + }, + { + "library": "scipy", + "name": "ifftn", + "source_code": "def ifftn(x, shape=None, axes=None, overwrite_x=False):\n shape = _good_shape(x, shape, axes)\n return _pocketfft.ifftn(x, shape, axes, None, overwrite_x)", + "docstring": "Return inverse multidimensional discrete Fourier transform. The sequence can be of an arbitrary type. The returned array contains:: y[j_1,..,j_d] = 1/p * sum[k_1=0..n_1-1, ..., k_d=0..n_d-1] x[k_1,..,k_d] * prod[i=1..d] exp(sqrt(-1)*2*pi/n_i * j_i * k_i) where `fftn`. See Also -------- fftn : for detailed information. Examples -------- >>> from scipy.fftpack import fftn, ifftn >>> import numpy as np >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16)) >>> np.allclose(y, ifftn(fftn(y))) True", + "type": "function", + "file_path": "scipy\\scipy\\fftpack\\_basic.py", + "ast_data": "FunctionDef name:ifftn arg:x arg:shape arg:axes arg:overwrite_x arguments arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_get_empty_routing", + "source_code": "def _get_empty_routing(self):\n return Bunch(**{name: Bunch(**{method: {} for method in METHODS}) for name, step, _, _ in self._iter(fitted=False, column_as_labels=False, skip_drop=True, skip_empty_columns=True)})", + "docstring": "Return empty routing. Used while routing can be disabled. TODO: Remove when `` is no more an option.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py", + "ast_data": "FunctionDef name:_get_empty_routing arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "get_uid", + "source_code": "def get_uid(prefix=''):\n graph = get_graph()\n if graph not in PER_GRAPH_OBJECT_NAME_UIDS:\n PER_GRAPH_OBJECT_NAME_UIDS[graph] = collections.defaultdict(int)\n layer_name_uids = PER_GRAPH_OBJECT_NAME_UIDS[graph]\n layer_name_uids[prefix] += 1\n return layer_name_uids[prefix]", + "docstring": "Associates a string prefix with an integer counter in a TensorFlow graph. Args: prefix: String prefix to index. Returns: Unique integer ID. Example: >>> get_uid('dense') 1 >>> get_uid('dense') 2", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:get_uid arg:prefix arguments arg Assign Call If Compare Assign Call Assign Return return:yes" + }, + { + "library": "sphinx", + "name": "_Inventory", + "source_code": "class _Inventory:\n __slots__ = ('data',)\n data: dict[str, dict[str, _InventoryItem]]\n\n def __init__(self, data: dict[str, dict[str, _InventoryItem]], /) -> None:\n self.data: dict[str, dict[str, _InventoryItem]] = data\n\n def __repr__(self) -> str:\n return f'_Inventory({self.data!r})'\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, _Inventory):\n return NotImplemented\n return self.data == other.data\n\n def __hash__(self) -> int:\n return hash(self.data)\n\n def __getitem__(self, item: tuple[str, str]) -> _InventoryItem:\n obj_type, name = item\n return self.data.setdefault(obj_type, {})[name]\n\n def __setitem__(self, item: tuple[str, str], value: _InventoryItem) -> None:\n obj_type, name = item\n self.data.setdefault(obj_type, {})[name] = value\n\n def __contains__(self, item: tuple[str, str]) -> bool:\n obj_type, name = item\n return obj_type in self.data and name in self.data[obj_type]", + "docstring": "Inventory data in memory.", + "type": "class", + "file_path": "sphinx\\sphinx\\util\\inventory.py", + "ast_data": "ClassDef name:_Inventory Assign FunctionDef name:__init__ arguments arg arg FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__getitem__ arg:self arg:item arguments arg arg Assign Return return:yes Call FunctionDef name:__setitem__ arg:self arg:item arg:value arguments arg arg arg Assign Assign Call FunctionDef name:__contains__ arg:self arg:item arguments arg arg Assign Return return:yes BoolOp Compare Compare" + }, + { + "library": "pytorch", + "name": "module", + "source_code": "@property\ndef module(self) -> nn.Module:\n if isinstance(self._fsdp_wrapped_module, ActivationWrapper):\n return getattr(self._fsdp_wrapped_module, _CHECKPOINT_WRAPPED_MODULE)\n return self._fsdp_wrapped_module", + "docstring": "Return the wrapped module.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py", + "ast_data": "FunctionDef name:module arg:self arguments arg If Call Return return:yes Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "urljoin_bytes", + "source_code": "def urljoin_bytes(*atoms):\n url = b'/'.join([x for x in atoms if x])\n while b'//' in url:\n url = url.replace(b'//', b'/')\n return url or b'/'", + "docstring": "Return the given path , joined into a single URL. This will correctly join a SCRIPT_NAME and PATH_INFO into the original URL, even if either atom is blank.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\httputil.py", + "ast_data": "FunctionDef name:urljoin_bytes arguments arg Assign Call While Compare Assign Call Return return:yes BoolOp" + }, + { + "library": "tensorflow", + "name": "resolve_input", + "source_code": "def resolve_input(self, input_name):\n name_elts = input_name.split(':')\n source_name = name_elts[0]\n if source_name[0] == '^':\n source_name = source_name[1:]\n source_index = 0\n if len(name_elts) > 1 and name_elts[-1].isnumeric():\n source_index = int(name_elts[-1])\n if self._function is None:\n return _EndPoint(self._enclosing_graph.nodes[source_name], source_index)\n if source_index != 0 or source_name in self._function.nodes:\n return _EndPoint(self._function.nodes[source_name], source_index)\n inputs = [i.name for i in self._function.function.signature.input_arg]\n return _EndPoint(self._function, inputs.index(source_name))", + "docstring": "Resolves an input into its _EndPoint. A NodeDef's input name can refer to either global NodeDefs (in the GraphDef's node list), a NodeDef in a function's node list, or a Function (in the GraphDef's function library). The name can also carry semantic information, depending on whether it starts with \"^\". This method handles all that logic in order to find the object to which the input name refers to. Args: input_name: The input name to resolve. Returns: The object referred to by 'input_name'.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py", + "ast_data": "FunctionDef name:resolve_input arg:self arg:input_name arguments arg arg Assign Call Assign If Compare Assign Assign If BoolOp Compare Call Call Assign Call If Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call Assign Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "convert_graph_def", + "source_code": "@tf_export('mlir.experimental.convert_graph_def')\ndef convert_graph_def(graph_def, pass_pipeline='tf-standard-pipeline', show_debug_info=False):\n return pywrap_mlir.import_graphdef(graph_def, pass_pipeline, show_debug_info)", + "docstring": "Import a GraphDef and convert it to a textual MLIR module. This API is only intended for inspecting the internals of TensorFlow and the string returned is at the moment intended for debugging purposes. Args: graph_def: An object of type graph_pb2.GraphDef or a textual proto representation of a valid GraphDef. pass_pipeline: A textual description of an MLIR Pass Pipeline to run on the module, see MLIR documentation for the [textual pass pipeline syntax]( show_debug_info: Whether to include locations in the emitted textual form. Returns: A textual representation of the MLIR module corresponding to the graphdef. Raises: InvalidArgumentError: if graph_def is invalid or cannot be converted to MLIR.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\compiler\\mlir\\mlir.py", + "ast_data": "FunctionDef name:convert_graph_def arg:graph_def arg:pass_pipeline arg:show_debug_info arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "set_synchronous_execution", + "source_code": "@tf_export('config.experimental.set_synchronous_execution')\ndef set_synchronous_execution(enable):\n if enable is None:\n context.context().execution_mode = None\n elif enable:\n context.context().execution_mode = context.SYNC\n else:\n context.context().execution_mode = context.ASYNC", + "docstring": "Specifies whether operations are executed synchronously or asynchronously. TensorFlow can execute operations synchronously or asynchronously. If asynchronous execution is enabled, operations may return \"non-ready\" handles. When is set to None, an appropriate value will be picked automatically. The value picked may change between TensorFlow releases. Args: enable: Whether operations should be dispatched synchronously. Valid values: - None: sets the system default. - True: executes each operation synchronously. - False: executes each operation asynchronously.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py", + "ast_data": "FunctionDef name:set_synchronous_execution arg:enable arguments arg If Compare Assign Call If Assign Call Assign Call Call" + }, + { + "library": "pytorch", + "name": "FunctionalModule", + "source_code": "class FunctionalModule(nn.Module):\n\n def __init__(self, stateless_model: nn.Module, param_names: tuple[str, ...], names_map: dict[str, list[str]]) -> None:\n super().__init__()\n self.stateless_model = stateless_model\n self.param_names = param_names\n self.names_map = names_map\n\n @staticmethod\n def _create_from(model: nn.Module, disable_autograd_tracking: bool=False) -> tuple['FunctionalModule', tuple[Tensor, ...]]:\n model_copy = copy.deepcopy(model)\n params, param_names, names_map = extract_weights(model_copy)\n if disable_autograd_tracking:\n for param in params:\n param.requires_grad_(False)\n return (FunctionalModule(model_copy, param_names, names_map), params)\n\n def forward(self, params: Iterable[Tensor], *args, **kwargs) -> Any:\n old_state = _swap_state(self.stateless_model, self.names_map, params)\n try:\n return self.stateless_model(*args, **kwargs)\n finally:\n _swap_state(self.stateless_model, self.names_map, old_state)", + "docstring": "This is the callable object returned by :func:.", + "type": "class", + "file_path": "pytorch\\torch\\_functorch\\make_functional.py", + "ast_data": "ClassDef name:FunctionalModule FunctionDef name:__init__ arg:self arg:stateless_model arg:param_names arg:names_map arguments arg arg arg arg Call Call Assign Assign Assign FunctionDef name:_create_from arg:model arg:disable_autograd_tracking arguments arg arg Assign Call Assign Call If For Call Return return:yes Call FunctionDef name:forward arg:self arg:params arguments arg arg arg arg Assign Call Try Return return:yes Call Call" + }, + { + "library": "django", + "name": "sys_path_directories", + "source_code": "def sys_path_directories():\n for path in sys.path:\n path = Path(path)\n if not path.exists():\n continue\n resolved_path = path.resolve().absolute()\n if resolved_path.is_file():\n yield resolved_path.parent\n else:\n yield resolved_path", + "docstring": "Yield absolute directories from sys.path, ignoring entries that don't exist.", + "type": "function", + "file_path": "django\\django\\utils\\autoreload.py", + "ast_data": "FunctionDef name:sys_path_directories arguments For Assign Call If Call Assign Call Call If Call" + }, + { + "library": "pytorch", + "name": "get_output_file_path", + "source_code": "def get_output_file_path(self) -> Optional[str]:\n if self.output_file_path:\n return self.output_file_path\n else:\n return None", + "docstring": "Returns the output file name or None.", + "type": "method", + "file_path": "pytorch\\torch\\profiler\\profiler.py", + "ast_data": "FunctionDef name:get_output_file_path arg:self arguments arg If Return return:yes Return return:no" + }, + { + "library": "matplotlib", + "name": "label_outer", + "source_code": "def label_outer(self, remove_inner_ticks=False):\n self._label_outer_xaxis(skip_non_rectangular_axes=False, remove_inner_ticks=remove_inner_ticks)\n self._label_outer_yaxis(skip_non_rectangular_axes=False, remove_inner_ticks=remove_inner_ticks)", + "docstring": "Only show \"outer\" labels and tick labels. x-labels are only kept for subplots on the last row (or first row, if labels are on the top side); y-labels only for subplots on the first column (or last column, if labels are on the right side). Parameters ---------- remove_inner_ticks : bool, default: False If True, remove the inner ticks as well (not only tick labels). .. versionadded:: 3.8", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:label_outer arg:self arg:remove_inner_ticks arguments arg arg Call Call" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, on_exit: OnExitType):\n self._on_exit = on_exit\n self._metrics: dict[str, Any] = {}\n self._start_time_ns: int = 0\n self._level: int = 0", + "docstring": "Use this class as a contextmanager to create a context under which to accumulate a set of metrics, e.g., metrics gathered during a compilation. On exit of the contextmanager, call the provided 'on_exit' function and pass a dictionary of all metrics set during the lifetime of the contextmanager.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:on_exit arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "cumsum", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef cumsum(x, axis=0):\n return math_ops.cumsum(x, axis=axis)", + "docstring": "Cumulative sum of the values in a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to compute the sum. Returns: A tensor of the cumulative sum of values of along .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:cumsum arg:x arg:axis arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_initialized_tpu_systems", + "source_code": "def get_initialized_tpu_systems():\n return _INITIALIZED_TPU_SYSTEMS.copy()", + "docstring": "Returns all currently initialized tpu systems. Returns: A dictionary, with tpu name as the key and the tpu topology as the value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_strategy_util.py", + "ast_data": "FunctionDef name:get_initialized_tpu_systems arguments Return return:yes Call" + }, + { + "library": "django", + "name": "write_pot_file", + "source_code": "def write_pot_file(potfile, msgs):\n pot_lines = msgs.splitlines()\n if os.path.exists(potfile):\n lines = dropwhile(len, pot_lines)\n else:\n lines = []\n found, header_read = (False, False)\n for line in pot_lines:\n if not found and (not header_read):\n if 'charset=CHARSET' in line:\n found = True\n line = line.replace('charset=CHARSET', 'charset=UTF-8')\n if not line and (not found):\n header_read = True\n lines.append(line)\n msgs = '\\n'.join(lines)\n with open(potfile, 'a', encoding='utf-8', newline='\\n') as fp:\n fp.write(msgs)", + "docstring": "Write the with the contents, making sure its format is valid.", + "type": "function", + "file_path": "django\\django\\core\\management\\commands\\makemessages.py", + "ast_data": "FunctionDef name:write_pot_file arg:potfile arg:msgs arguments arg arg Assign Call If Call Assign Call Assign Assign For If BoolOp If Compare Assign Assign Call If BoolOp Assign Call Assign Call With Call Call" + }, + { + "library": "tensorflow", + "name": "_ensure_recording", + "source_code": "@tf_contextlib.contextmanager\ndef _ensure_recording(self):\n if not self._recording:\n try:\n self._push_tape()\n yield\n finally:\n self._pop_tape()\n else:\n yield", + "docstring": "Ensures that this tape is recording.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py", + "ast_data": "FunctionDef name:_ensure_recording arg:self arguments arg If Try Call Call" + }, + { + "library": "tensorflow", + "name": "experimental_from_jax", + "source_code": "@classmethod\n@_deprecation.deprecated(None, 'Use `jax2tf.convert` and (`lite.TFLiteConverter.from_saved_model` or `lite.TFLiteConverter.from_concrete_functions`) instead.')\ndef experimental_from_jax(cls, serving_funcs, inputs):\n TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.JAX)\n return TFLiteJaxConverterV2(serving_funcs, inputs)", + "docstring": "Creates a TFLiteConverter object from a Jax model with its inputs. Args: serving_funcs: An array of Jax functions with all the weights applied already. inputs: An array of Jax input placeholders tuples list, e.g., jnp.zeros(INPUT_SHAPE). Each tuple list should correspond with the serving function. Returns: TFLiteConverter object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:experimental_from_jax arg:cls arg:serving_funcs arg:inputs arguments arg arg arg Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "set_rlim", + "source_code": "def set_rlim(self, bottom=None, top=None, *, emit=True, auto=False, **kwargs):\n if 'rmin' in kwargs:\n if bottom is None:\n bottom = kwargs.pop('rmin')\n else:\n raise ValueError('Cannot supply both positional \"bottom\"argument and kwarg \"rmin\"')\n if 'rmax' in kwargs:\n if top is None:\n top = kwargs.pop('rmax')\n else:\n raise ValueError('Cannot supply both positional \"top\"argument and kwarg \"rmax\"')\n return self.set_ylim(bottom=bottom, top=top, emit=emit, auto=auto, **kwargs)", + "docstring": "Set the radial axis view limits. This function behaves like , but additionally supports *rmin* and *rmax* as aliases for *bottom* and *top*. See Also -------- .Axes.set_ylim", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py", + "ast_data": "FunctionDef name:set_rlim arg:self arg:bottom arg:top arguments arg arg arg arg arg arg If Compare If Compare Assign Call Raise Call If Compare If Compare Assign Call Raise Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_handle_to_backward_prefetch", + "source_code": "def get_handle_to_backward_prefetch(self, current_handle: FlatParamHandle) -> Optional[FlatParamHandle]:\n current_index = current_handle._post_forward_index\n if current_index is None:\n return None\n target_index = current_index - 1\n target_handle: Optional[FlatParamHandle] = None\n for _ in range(self._backward_prefetch_limit):\n if target_index < 0:\n break\n target_handle = self.handles_post_forward_order[target_index]\n target_index -= 1\n return target_handle", + "docstring": "Returns a :class: of the handles keys of the handles to backward prefetch given the current handles key. If there are no valid handles keys to prefetch, then this returns an empty :class:.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_exec_order_utils.py", + "ast_data": "FunctionDef name:get_handle_to_backward_prefetch arg:self arg:current_handle arguments arg arg Assign If Compare Return return:no Assign For Call If Compare Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "DummyModule", + "source_code": "class DummyModule:\n\n def __init__(self) -> None:\n pass\n\n def call(self, *args: Any, **kwargs: Any) -> None:\n pass", + "docstring": "This is empty to replace the generated triton module", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\utils.py", + "ast_data": "ClassDef name:DummyModule FunctionDef name:__init__ arg:self arguments arg FunctionDef name:call arg:self arguments arg arg arg" + }, + { + "library": "scrapy", + "name": "send_catch_log", + "source_code": "def send_catch_log(self, signal: Any, **kwargs: Any) -> list[tuple[Any, Any]]:\n kwargs.setdefault('sender', self.sender)\n return _signal.send_catch_log(signal, **kwargs)", + "docstring": "Send a signal, catch exceptions and log them. The keyword arguments are passed to the signal handlers (connected through the :meth: method).", + "type": "method", + "file_path": "scrapy\\scrapy\\signalmanager.py", + "ast_data": "FunctionDef name:send_catch_log arg:self arg:signal arguments arg arg arg Call Return return:yes Call" + }, + { + "library": "scrapy", + "name": "create_instance", + "source_code": "def create_instance(objcls, settings, crawler, *args, **kwargs):\n warnings.warn('The create_instance() function is deprecated. Please use build_from_crawler() instead.', category=ScrapyDeprecationWarning, stacklevel=2)\n if settings is None:\n if crawler is None:\n raise ValueError('Specify at least one of settings and crawler.')\n settings = crawler.settings\n if crawler and hasattr(objcls, 'from_crawler'):\n instance = objcls.from_crawler(crawler, *args, **kwargs)\n method_name = 'from_crawler'\n elif hasattr(objcls, 'from_settings'):\n instance = objcls.from_settings(settings, *args, **kwargs)\n method_name = 'from_settings'\n else:\n instance = objcls(*args, **kwargs)\n method_name = '__new__'\n if instance is None:\n raise TypeError(f'{objcls.__qualname__}.{method_name} returned None')\n return instance", + "docstring": "Construct a class instance using its `` (e.g. if an extension has not been implemented correctly).", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\misc.py", + "ast_data": "FunctionDef name:create_instance arg:objcls arg:settings arg:crawler arguments arg arg arg arg arg Call If Compare If Compare Raise Call Assign If BoolOp Call Assign Call Assign If Call Assign Call Assign Assign Call Assign If Compare Raise Call Return return:yes" + }, + { + "library": "scipy", + "name": "fft", + "source_code": "def fft(x, n=None, axis=-1, overwrite_x=False):\n return _pocketfft.fft(x, n, axis, None, overwrite_x)", + "docstring": "Return discrete Fourier transform of real or complex sequence. The returned complex array contains `xxfftshiftnnxrfftdct` can again double the efficiency by generating half of the spectrum from half of the signal. Examples -------- >>> import numpy as np >>> from scipy.fftpack import fft, ifft >>> x = np.arange(5) >>> np.allclose(fft(ifft(x)), x, atol=1e-15) # within numerical accuracy. True", + "type": "function", + "file_path": "scipy\\scipy\\fftpack\\_basic.py", + "ast_data": "FunctionDef name:fft arg:x arg:n arg:axis arg:overwrite_x arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "DisableBreakpoints", + "source_code": "class DisableBreakpoints:\n\n def __enter__(self) -> None:\n self.disabled_breakpoints = []\n for b in gdb.breakpoints():\n if b.enabled:\n b.enabled = False\n self.disabled_breakpoints.append(b)\n\n def __exit__(self, etype: Any, evalue: Any, tb: Any) -> None:\n for b in self.disabled_breakpoints:\n b.enabled = True", + "docstring": "Context-manager to temporarily disable all gdb breakpoints, useful if there is a risk to hit one during the evaluation of one of our custom commands", + "type": "class", + "file_path": "pytorch\\tools\\gdb\\pytorch-gdb.py", + "ast_data": "ClassDef name:DisableBreakpoints FunctionDef name:__enter__ arg:self arguments arg Assign For Call If Assign Call FunctionDef name:__exit__ arg:self arg:etype arg:evalue arg:tb arguments arg arg arg arg For Assign" + }, + { + "library": "pytorch", + "name": "LiveRanges", + "source_code": "class LiveRanges:\n\n def __init__(self, ranges: Iterable[LiveRange]):\n ranges = [*sorted(ranges, key=lambda x: x.begin)]\n self.ranges = ranges[:1]\n for r in ranges[1:]:\n assert self.ranges[-1].begin <= r.begin\n if self.ranges[-1].end >= r.begin:\n self.ranges[-1] = LiveRange.join(self.ranges[-1], r)\n else:\n self.ranges.append(r)\n\n def overlaps(self, other: LiveRanges):\n left = collections.deque(self.ranges)\n right = collections.deque(other.ranges)\n while left and right:\n if left[0].begin > right[0].begin:\n left, right = (right, left)\n assert left[0].begin <= right[0].begin\n if left[0].end > right[0].begin:\n return True\n left.popleft()\n return False\n\n @property\n def begin(self):\n return self.ranges[0].begin\n\n @property\n def end(self):\n return self.ranges[-1].end\n\n def __repr__(self):\n return f'{self.__class__.__name__}([{', '.join(map(repr, self.ranges))}])'", + "docstring": "A collection of LiveRange regions, allowing for non-contiguous live regions. Invariant: LiveRanges.ranges is in sorted order and non-overlapping", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py", + "ast_data": "ClassDef name:LiveRanges FunctionDef name:__init__ arg:self arg:ranges arguments arg arg Assign Call arguments arg Assign For Compare If Compare Assign Call Call FunctionDef name:overlaps arg:self arg:other arguments arg arg Assign Call Assign Call While BoolOp If Compare Assign Compare If Compare Return return:yes Call Return return:yes FunctionDef name:begin arg:self arguments arg Return return:yes FunctionDef name:end arg:self arguments arg Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n X = validate_data(self, X, ensure_min_samples=2, estimator='MinCovDet')\n random_state = check_random_state(self.random_state)\n n_samples, n_features = X.shape\n if (linalg.svdvals(np.dot(X.T, X)) > 1e-08).sum() != n_features:\n warnings.warn('The covariance matrix associated to your dataset is not full rank')\n raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(X, support_fraction=self.support_fraction, cov_computation_method=self._nonrobust_covariance, random_state=random_state)\n if self.assume_centered:\n raw_location = np.zeros(n_features)\n raw_covariance = self._nonrobust_covariance(X[raw_support], assume_centered=True)\n precision = linalg.pinvh(raw_covariance)\n raw_dist = np.sum(np.dot(X, precision) * X, 1)\n self.raw_location_ = raw_location\n self.raw_covariance_ = raw_covariance\n self.raw_support_ = raw_support\n self.location_ = raw_location\n self.support_ = raw_support\n self.dist_ = raw_dist\n self.correct_covariance(X)\n self.reweight_covariance(X)\n return self", + "docstring": "Fit a Minimum Covariance Determinant with the FastMCD algorithm. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\covariance\\_robust_covariance.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Assign Call Assign If Compare Call Compare Call Call Call Assign Call If Assign Call Assign Call Assign Call Assign Call Call Assign Assign Assign Assign Assign Assign Call Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "parse", + "source_code": "def parse(self) -> None:\n while True:\n token = self.fetch_token()\n if token is None:\n break\n if token == COMMENT:\n pass\n elif token == [OP, '@'] and (self.previous is None or self.previous.match(NEWLINE, NL, INDENT, DEDENT)):\n if self.decorator is None:\n self.decorator = token\n elif token.match([NAME, 'class']):\n self.parse_definition('class')\n elif token.match([NAME, 'def']):\n self.parse_definition('def')\n elif token == INDENT:\n self.indents.append(('other', None, None))\n elif token == DEDENT:\n self.finalize_block()", + "docstring": "Parse the code to obtain location of definitions.", + "type": "method", + "file_path": "sphinx\\sphinx\\pycode\\parser.py", + "ast_data": "FunctionDef name:parse arg:self arguments arg While Assign Call If Compare If Compare If BoolOp Compare BoolOp Compare Call If Compare Assign If Call Call If Call Call If Compare Call If Compare Call" + }, + { + "library": "scipy", + "name": "yeojohnson", + "source_code": "def yeojohnson(x, lmbda=None):\n x = np.asarray(x)\n if x.size == 0:\n return x\n if np.issubdtype(x.dtype, np.complexfloating):\n raise ValueError('Yeo-Johnson transformation is not defined for complex numbers.')\n if np.issubdtype(x.dtype, np.integer):\n x = x.astype(np.float64, copy=False)\n if lmbda is not None:\n return _yeojohnson_transform(x, lmbda)\n lmax = yeojohnson_normmax(x)\n y = _yeojohnson_transform(x, lmax)\n return (y, lmax)", + "docstring": "Return a dataset transformed by a Yeo-Johnson power transformation. Parameters ---------- x : ndarray Input array. Should be 1-dimensional. lmbda : float, optional If `lmbdayeojohnson` to transform the data so it's closest to normal: >>> ax2 = fig.add_subplot(212) >>> xt, lmbda = stats.yeojohnson(x) >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2) >>> ax2.set_title('Probplot after Yeo-Johnson transformation') >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_morestats.py", + "ast_data": "FunctionDef name:yeojohnson arg:x arg:lmbda arguments arg arg Assign Call If Compare Return return:yes If Call Raise Call If Call Assign Call If Compare Return return:yes Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_indexed_case_helper", + "source_code": "def _indexed_case_helper(branch_fns, default, branch_index, name, lower_using_switch_merge=None):\n branch_fns = _indexed_case_verify_and_canonicalize_args(branch_fns, default, branch_index)\n with ops.name_scope(name, 'case', [branch_index]):\n if context.executing_eagerly() and (not hasattr(branch_index, 'graph')):\n branch_index = array_ops.where(math_ops.less(branch_index, 0) | math_ops.greater_equal(branch_index, len(branch_fns)), len(branch_fns) - 1, branch_index)\n return branch_fns[int(branch_index)]()\n return cond_v2.indexed_case(branch_index, branch_fns, lower_using_switch_merge=lower_using_switch_merge)", + "docstring": "Implementation of case that emits the n-way indexed Case op. Args: branch_fns: Dict or list of pairs of a boolean scalar tensor, and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. branch_index: Optional int , which selects for the corresponding pred_fn_pair. name: A name for this operation (optional). lower_using_switch_merge: Lower this op using switch merge ops (optional). Returns: The tensors returned by the pair whose key matched branch_index, or those returned by if none does. Raises: TypeError: If is not a list/dictionary. TypeError: If is a list but does not contain 2-tuples or callables. TypeError: If is not callable for any i, or is not callable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_switch_case.py", + "ast_data": "FunctionDef name:_indexed_case_helper arg:branch_fns arg:default arg:branch_index arg:name arg:lower_using_switch_merge arguments arg arg arg arg arg Assign Call With Call If BoolOp Call Call Assign Call Call Call Call Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "codes", + "source_code": "@property\ndef codes(self) -> np.ndarray:\n v = self._codes.view()\n v.flags.writeable = False\n return v", + "docstring": "The category codes of this categorical index. Codes are an array of integers which are the positions of the actual values in the categories array. There is no setter, use the other categorical methods and the normal item setter to change values in the categorical. Returns ------- ndarray[int] A non-writable view of the `pandas.Categoricalpandas.CategoricalIndex`: >>> ci = pd.CategoricalIndex([\"a\", \"b\", \"c\", \"a\", \"b\", \"c\"]) >>> ci.codes array([0, 1, 2, 0, 1, 2], dtype=int8) >>> ci = pd.CategoricalIndex([\"a\", \"c\"], categories=[\"c\", \"b\", \"a\"]) >>> ci.codes array([2, 0], dtype=int8)", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", + "ast_data": "FunctionDef name:codes arg:self arguments arg Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, feature_config: Union[tpu_embedding_v2_utils.FeatureConfig, Iterable], optimizer: Optional[tpu_embedding_v2_utils._Optimizer]=None):\n self._feature_config = feature_config\n self._output_shapes = []\n for feature in nest.flatten(feature_config):\n self._output_shapes.append(feature.output_shape)\n self._table_config = []\n for feature in nest.flatten(feature_config):\n if feature.table not in self._table_config:\n self._table_config.append(feature.table)\n table_names = []\n for i, table in enumerate(self._table_config):\n if table.optimizer is None:\n table.optimizer = optimizer\n if table.optimizer is not None and (not isinstance(table.optimizer, tpu_embedding_v2_utils._Optimizer)):\n raise ValueError('{} is an unsupported optimizer class. Please pass an instance of one of the optimizer classes under tf.tpu.experimental.embedding.'.format(type(table.optimizer)))\n if table.name is None:\n table.name = 'table_{}'.format(i)\n if table.name in table_names:\n raise ValueError(f'Tables must have a unique name. Multiple tables with name {table.name} found.')\n table_names.append(table.name)\n self._built = False", + "docstring": "Creates the TPUEmbeddingBase object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_base.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:feature_config arg:optimizer arguments arg arg arg Assign Assign For Call Call Assign For Call If Compare Call Assign For Call If Compare Assign If BoolOp Compare Call Raise Call Call Call If Compare Assign Call If Compare Raise Call Call Assign" + }, + { + "library": "pytorch", + "name": "register_fsdp", + "source_code": "def register_fsdp(self, fsdp: FullyShardedDataParallel) -> None:\n raise NotImplementedError(f'{self.__class__.__name__} does not support overlapped FSDP.')", + "docstring": "Register the overlapped optimizer with FSDP.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\algorithms\\_optimizer_overlap\\optimizer_overlap.py", + "ast_data": "FunctionDef name:register_fsdp arg:self arg:fsdp arguments arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "incomplete_size", + "source_code": "def incomplete_size(self, name=None):\n if name is None:\n name = '%s_BarrierIncompleteSize' % self._name\n return gen_data_flow_ops.barrier_incomplete_size(self._barrier_ref, name=name)", + "docstring": "Compute the number of incomplete elements in the given barrier. Args: name: A name for the operation (optional). Returns: A single-element tensor containing the number of incomplete elements in the given barrier.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", + "ast_data": "FunctionDef name:incomplete_size arg:self arg:name arguments arg arg If Compare Assign Return return:yes Call" + }, + { + "library": "sphinx", + "name": "InfoFilter", + "source_code": "class InfoFilter(logging.Filter):\n\n def filter(self, record: logging.LogRecord) -> bool:\n return record.levelno < logging.WARNING", + "docstring": "Filter error and warning messages.", + "type": "class", + "file_path": "sphinx\\sphinx\\util\\logging.py", + "ast_data": "ClassDef name:InfoFilter FunctionDef name:filter arg:self arg:record arguments arg arg Return return:yes Compare" + }, + { + "library": "matplotlib", + "name": "trigger_tool", + "source_code": "def trigger_tool(self, name, sender=None, canvasevent=None, data=None):\n tool = self.get_tool(name)\n if tool is None:\n return\n if sender is None:\n sender = self\n if isinstance(tool, backend_tools.ToolToggleBase):\n self._handle_toggle(tool, canvasevent, data)\n tool.trigger(sender, canvasevent, data)\n s = 'tool_trigger_%s' % name\n event = ToolTriggerEvent(s, sender, tool, canvasevent, data)\n self._callbacks.process(s, event)", + "docstring": "Trigger a tool and emit the `` event. Parameters ---------- name : str Name of the tool. sender : object Object that wishes to trigger the tool. canvasevent : Event Original Canvas event or None. data : object Extra data to pass to the tool when triggering.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py", + "ast_data": "FunctionDef name:trigger_tool arg:self arg:name arg:sender arg:canvasevent arg:data arguments arg arg arg arg arg Assign Call If Compare Return return:no If Compare Assign If Call Call Call Assign Assign Call Call" + }, + { + "library": "scipy", + "name": "splint", + "source_code": "def splint(a, b, tck, full_output=0):\n if isinstance(tck, BSpline):\n if tck.c.ndim > 1:\n mesg = 'Calling splint() with BSpline objects with c.ndim > 1 is not allowed. Use BSpline.integrate() instead.'\n raise ValueError(mesg)\n if full_output != 0:\n mesg = f'full_output = {full_output} is not supported. Proceeding as if full_output = 0'\n return tck.integrate(a, b, extrapolate=False)\n else:\n return _impl.splint(a, b, tck, full_output)", + "docstring": "Evaluate the definite integral of a B-spline between two given points. .. legacy:: function Specifically, we recommend constructing a object and using its `splevfull_outputsplintabBSplinein the tutorial `.", + "type": "function", + "file_path": "scipy\\scipy\\interpolate\\_fitpack_py.py", + "ast_data": "FunctionDef name:splint arg:a arg:b arg:tck arg:full_output arguments arg arg arg arg If Call If Compare Assign Raise Call If Compare Assign Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_update_detector_quantizaiton_qconfig_info", + "source_code": "def _update_detector_quantizaiton_qconfig_info(self, combined_info: DetectorQConfigInfo, new_info: DetectorQConfigInfo):\n combined_info.is_activation_dynamic = combined_info.is_activation_dynamic or new_info.is_activation_dynamic\n combined_info.is_weight_per_channel = combined_info.is_weight_per_channel or new_info.is_weight_per_channel", + "docstring": "Takes in the old and new information and updates the combined information. Args: combined_info (DetectorQConfigInfo): The DetectorQConfigInfo we are compiling all of the information in new_info (DetectorQConfigInfo): The DetectorQConfigInfo with the information we are trying to merge the new info into it", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py", + "ast_data": "FunctionDef name:_update_detector_quantizaiton_qconfig_info arg:self arg:combined_info arg:new_info arguments arg arg arg Assign BoolOp Assign BoolOp" + }, + { + "library": "tensorflow", + "name": "DatasetV2", + "source_code": "@tf_export('__internal__.types.data.Dataset', v1=[])\nclass DatasetV2(abc.ABC):\n pass", + "docstring": "Represents the TensorFlow 2 type .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\types\\data.py", + "ast_data": "ClassDef name:DatasetV2 Call" + }, + { + "library": "django", + "name": "geom_output", + "source_code": "def geom_output(func, argtypes, offset=None):\n func.argtypes = argtypes\n if not offset:\n func.restype = c_void_p\n func.errcheck = check_geom\n else:\n func.restype = c_int\n\n def geomerrcheck(result, func, cargs):\n return check_geom_offset(result, func, cargs, offset)\n func.errcheck = geomerrcheck\n return func", + "docstring": "Generate a function that returns a Geometry either by reference or directly (if the return_geom keyword is set to True).", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\generation.py", + "ast_data": "FunctionDef name:geom_output arg:func arg:argtypes arg:offset arguments arg arg arg Assign If Assign Assign Assign FunctionDef name:geomerrcheck arg:result arg:func arg:cargs arguments arg arg arg Return return:yes Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "name", + "source_code": "@property\ndef name(self):\n return self.key", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "rotate", + "source_code": "def rotate(tensor: Tensor, angle: Tensor, center: Union[None, Tensor]=None, mode: str='bilinear', padding_mode: str='zeros', align_corners: bool=True) -> Tensor:\n if not isinstance(tensor, Tensor):\n raise TypeError(f'Input tensor type is not a Tensor. Got {type(tensor)}')\n if not isinstance(angle, Tensor):\n raise TypeError(f'Input angle type is not a Tensor. Got {type(angle)}')\n if center is not None and (not isinstance(center, Tensor)):\n raise TypeError(f'Input center type is not a Tensor. Got {type(center)}')\n if len(tensor.shape) not in (3, 4):\n raise ValueError(f'Invalid tensor shape, we expect CxHxW or BxCxHxW. Got: {tensor.shape}')\n if center is None:\n center = _compute_tensor_center(tensor)\n angle = angle.expand(tensor.shape[0])\n center = center.expand(tensor.shape[0], -1)\n rotation_matrix: Tensor = _compute_rotation_matrix(angle, center)\n return affine(tensor, rotation_matrix[..., :2, :3], mode, padding_mode, align_corners)", + "docstring": "Rotate the tensor anti-clockwise about the center. .. image:: _static/img/rotate.png Args: tensor: The image tensor to be warped in shapes of :math:. angle: The angle through which to rotate. The tensor must have a shape of (B), where B is batch size. center: The center through which to rotate. The tensor must have a shape of (B, 2), where B is batch size and last dimension contains cx and cy. mode: interpolation mode to calculate output values `here `__. Example: >>> img = torch.rand(1, 3, 4, 4) >>> angle = torch.tensor([90.]) >>> out = rotate(img, angle) >>> print(out.shape) torch.Size([1, 3, 4, 4])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py", + "ast_data": "FunctionDef name:rotate arg:tensor arg:angle arg:center arg:mode arg:padding_mode arg:align_corners arguments arg arg arg arg arg arg If Call Raise Call Call If Call Raise Call Call If BoolOp Compare Call Raise Call Call If Compare Call Raise Call If Compare Assign Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "cross_hashed", + "source_code": "@tf_export('ragged.cross_hashed')\n@dispatch.add_dispatch_support\ndef cross_hashed(inputs, num_buckets=0, hash_key=None, name=None):\n return _cross_internal(inputs=inputs, hashed_output=True, num_buckets=num_buckets, hash_key=hash_key, name=name)", + "docstring": "Generates hashed feature cross from a list of tensors. The input tensors must have , and must all have the same number of rows. The result is a with the same number of rows as the inputs, where contains a list of all combinations of values formed by taking a single value from each input's corresponding row (). Values are combined by hashing together their fingerprints. E.g.: >>> tf.ragged.cross_hashed([tf.ragged.constant([['a'], ['b', 'c']]), ... tf.ragged.constant([['d'], ['e']]), ... tf.ragged.constant([['f'], ['g']])], ... num_buckets=100) Args: inputs: A list of or or . num_buckets: A non-negative that used to bucket the hashed values. If , then . hash_key: Integer hash_key that will be used by the function. If not given, a default key is used. name: Optional name for the op. Returns: A 2D of type .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py", + "ast_data": "FunctionDef name:cross_hashed arg:inputs arg:num_buckets arg:hash_key arg:name arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "center", + "source_code": "@property\ndef center(self):\n x0, y0, width, height = self._rect_bbox\n return (x0 + width / 2.0, y0 + height / 2.0)", + "docstring": "Center of rectangle in data coordinates.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:center arg:self arguments arg Assign Return return:yes" + }, + { + "library": "scipy", + "name": "num", + "source_code": "@property\ndef num(self):\n return self._num", + "docstring": "Numerator of the system.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:num arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_transform_linear_with_packedparam", + "source_code": "def _transform_linear_with_packedparam(gm: torch.fx.GraphModule, node: torch.fx.Node):\n scale_node, zero_point_node = (node.args[2], node.args[3])\n inp_node, param_node = (node.args[0], node.args[1])\n assert isinstance(inp_node, torch.fx.Node)\n assert isinstance(param_node, torch.fx.Node)\n if param_node.op == 'call_function':\n w_node, b_node = (param_node.args[0], param_node.args[1])\n assert isinstance(w_node, torch.fx.Node)\n assert b_node is None or isinstance(b_node, torch.fx.Node)\n param_0, param_1 = insert_weight_and_bias_get_attr_node_from_get_attr_to_qtensor(gm, w_node, b_node)\n op_res_node = gm.graph.call_function(torch.ops.aten.linear, (inp_node, param_0, param_1, *param_node.args[2:]))\n else:\n param_0, param_1 = insert_weight_and_bias_get_attr_node_from_get_attr_to_scriptobject(gm, param_node)\n op_res_node = gm.graph.call_function(torch.ops.aten.linear, (inp_node, param_0, param_1))\n return (op_res_node, scale_node, zero_point_node)", + "docstring": "Linear specfic transformation function.", + "type": "function", + "file_path": "pytorch\\torch\\_export\\passes\\replace_quantized_ops_with_standard_ops_pass.py", + "ast_data": "FunctionDef name:_transform_linear_with_packedparam arg:gm arg:node arguments arg arg Assign Assign Call Call If Compare Assign Call BoolOp Compare Call Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "render_to_response", + "source_code": "def render_to_response(self, context, **response_kwargs):\n response_kwargs.setdefault('content_type', self.content_type)\n return self.response_class(request=self.request, template=self.get_template_names(), context=context, using=self.template_engine, **response_kwargs)", + "docstring": "Return a response, using the for this view, with a template rendered with the given context. Pass response_kwargs to the constructor of the response class.", + "type": "method", + "file_path": "django\\django\\views\\generic\\base.py", + "ast_data": "FunctionDef name:render_to_response arg:self arg:context arguments arg arg arg Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "get_previous_year", + "source_code": "def get_previous_year(self, date):\n return _get_next_prev(self, date, is_previous=True, period='year')", + "docstring": "Get the previous valid year.", + "type": "method", + "file_path": "django\\django\\views\\generic\\dates.py", + "ast_data": "FunctionDef name:get_previous_year arg:self arg:date arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_PythonStringStateSaveable", + "source_code": "class _PythonStringStateSaveable(saveable_object.SaveableObject):\n\n def __init__(self, name, state_callback, restore_callback):\n\n def _state_callback_wrapper():\n with ops.init_scope():\n return state_callback()\n self._state_callback = _state_callback_wrapper\n self._restore_callback = restore_callback\n with ops.device('/cpu:0'):\n self._save_string = constant_op.constant('', dtype=dtypes.string)\n spec = saveable_object.SaveSpec(self._save_string, '', name, dtype=dtypes.string)\n super(_PythonStringStateSaveable, self).__init__(self._save_string, [spec], name)\n\n def feed_dict_additions(self):\n return {self._save_string: self._state_callback()}\n\n def freeze(self):\n\n def _constant_state():\n return constant_op.constant(self._state_callback(), dtype=dtypes.string)\n return trackable.NoRestoreSaveable(tensor=_constant_state, dtype=dtypes.string, name=self.name, device='cpu:0')", + "docstring": "Saves Python state in a checkpoint.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py", + "ast_data": "ClassDef name:_PythonStringStateSaveable FunctionDef name:__init__ arg:self arg:name arg:state_callback arg:restore_callback arguments arg arg arg arg FunctionDef name:_state_callback_wrapper arguments With Call Return return:yes Call Assign Assign With Call Assign Call Assign Call Call Call FunctionDef name:feed_dict_additions arg:self arguments arg Return return:yes Call FunctionDef name:freeze arg:self arguments arg FunctionDef name:_constant_state arguments Return return:yes Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_apply_combiner_to_embeddings", + "source_code": "def _apply_combiner_to_embeddings(self, embeddings: tensor.Tensor, weight: tensor.Tensor, combiner: Optional[Text]=None) -> tensor.Tensor:\n if combiner is None:\n combiner = 'mean'\n if combiner == 'sum':\n embeddings = math_ops.reduce_sum(embeddings, axis=-2)\n elif combiner == 'mean':\n embeddings = math_ops.reduce_sum(embeddings, axis=-2)\n weight_sum = math_ops.reduce_sum(weight, axis=-2)\n embeddings = math_ops.div_no_nan(embeddings, weight_sum)\n elif combiner == 'sqrtn':\n embeddings = math_ops.reduce_sum(embeddings, axis=-2)\n weight_squared = math_ops.pow(weight, 2)\n weight_sum = math_ops.reduce_sum(weight_squared, axis=-2)\n weight_sum_sqrt = math_ops.sqrt(weight_sum)\n embeddings = math_ops.div_no_nan(embeddings, weight_sum_sqrt)\n else:\n raise ValueError(f\"combiner must be one of 'mean', 'sqrtn' or 'sum', got {combiner}\")\n return embeddings", + "docstring": "Apply the combiner to the embedding look up result on second to last axis. Args: embeddings: A Tensor of the embedding lookup result. weight: A Tensor of weight which has the same shape of the embeddings. combiner: One of \"mean\", \"sum\", \"sqrtn\". Defaults to \"mean\". Raises: ValueError: If the combiner is not one of 'mean', 'sqrtn' or 'sum'. Returns: A Tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v1.py", + "ast_data": "FunctionDef name:_apply_combiner_to_embeddings arg:self arg:embeddings arg:weight arg:combiner arguments arg arg arg arg If Compare Assign If Compare Assign Call If Compare Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Assign Call Raise Call Return return:yes" + }, + { + "library": "sphinx", + "name": "CitationDefinitionTransform", + "source_code": "class CitationDefinitionTransform(SphinxTransform):\n default_priority = 619\n\n def apply(self, **kwargs: Any) -> None:\n domain = self.env.domains.citation_domain\n for node in self.document.findall(nodes.citation):\n node['docname'] = self.env.docname\n domain.note_citation(node)\n label = cast('nodes.label', node[0])\n label['support_smartquotes'] = False", + "docstring": "Mark citation definition labels as not smartquoted.", + "type": "class", + "file_path": "sphinx\\sphinx\\domains\\citation.py", + "ast_data": "ClassDef name:CitationDefinitionTransform Assign FunctionDef name:apply arg:self arguments arg arg Assign For Call Assign Call Assign Call Assign" + }, + { + "library": "pytorch", + "name": "Access", + "source_code": "@dataclass\nclass Access:\n type: AccessType\n seq_num: SeqNum\n stream: StreamId\n operator: str\n aliases: list[str]\n is_output: bool\n stack_trace: traceback.StackSummary", + "docstring": "Stores information about a single access to a tensor by a kernel. Args: type: either AccessType.READ or AccessType.Write. seq_num: the sequential number of the kernel performing the access. stream: the stream id of the stream executing the kernel. operator: the schema of the launched kernel, which lists the arguments and return type. aliases: the arguments in the schema this access corresponds to. is_output: Whether the tensor was an output of the kernel. stack_trace: the stack summary object captured during access.", + "type": "class", + "file_path": "pytorch\\torch\\cuda\\_sanitizer.py", + "ast_data": "ClassDef name:Access" + }, + { + "library": "scipy", + "name": "is_pydata_spmatrix", + "source_code": "def is_pydata_spmatrix(m) -> bool:\n base_cls = getattr(sys.modules.get('sparse'), 'SparseArray', None)\n return base_cls is not None and isinstance(m, base_cls)", + "docstring": "Check whether object is pydata/sparse matrix, avoiding importing the module.", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\_sputils.py", + "ast_data": "FunctionDef name:is_pydata_spmatrix arg:m arguments arg Assign Call Call Return return:yes BoolOp Compare Call" + }, + { + "library": "tensorflow", + "name": "get_or_create_variables_dir", + "source_code": "def get_or_create_variables_dir(export_dir):\n variables_dir = get_variables_dir(export_dir)\n file_io.recursive_create_dir(variables_dir)\n return variables_dir", + "docstring": "Return variables sub-directory, or create one if it doesn't exist.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\path_helpers.py", + "ast_data": "FunctionDef name:get_or_create_variables_dir arg:export_dir arguments arg Assign Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "reindex", + "source_code": "def reindex(self, target, method=None, level=None, limit: int | None=None, tolerance=None) -> tuple[Index, npt.NDArray[np.intp] | None]:\n if method is not None:\n raise NotImplementedError('argument method is not implemented for CategoricalIndex.reindex')\n if level is not None:\n raise NotImplementedError('argument level is not implemented for CategoricalIndex.reindex')\n if limit is not None:\n raise NotImplementedError('argument limit is not implemented for CategoricalIndex.reindex')\n return super().reindex(target)", + "docstring": "Create index with target's values (move/add/delete values as necessary) Returns ------- new_index : pd.Index Resulting index indexer : np.ndarray[np.intp] or None Indices of output values in original index", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\category.py", + "ast_data": "FunctionDef name:reindex arg:self arg:target arg:method arg:level arg:limit arg:tolerance arguments arg arg arg arg arg arg If Compare Raise Call If Compare Raise Call If Compare Raise Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_process_dataframe", + "source_code": "@final\ndef _process_dataframe(self) -> dict[int | str, dict[str, Any]]:\n df = self.frame\n if self.index:\n df = df.reset_index()\n if self.na_rep is not None:\n df = df.fillna(self.na_rep)\n return df.to_dict(orient='index')", + "docstring": "Adjust Data Frame to fit xml output. This method will adjust underlying data frame for xml output, including optionally replacing missing values and including indexes.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\xml.py", + "ast_data": "FunctionDef name:_process_dataframe arg:self arguments arg Assign If Assign Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "sphinx", + "name": "_toctree_add_classes", + "source_code": "def _toctree_add_classes(node: Element, depth: int, docname: str) -> None:\n for subnode in node.children:\n if isinstance(subnode, addnodes.compact_paragraph | nodes.list_item):\n subnode['classes'].append(f'toctree-l{depth - 1}')\n _toctree_add_classes(subnode, depth, docname)\n elif isinstance(subnode, nodes.bullet_list):\n _toctree_add_classes(subnode, depth + 1, docname)\n elif isinstance(subnode, nodes.reference):\n if subnode['refuri'] == docname:\n if not subnode['anchorname']:\n branchnode: Element = subnode\n while branchnode:\n branchnode['classes'].append('current')\n branchnode = branchnode.parent\n if subnode.parent.parent.get('iscurrent'):\n return\n while subnode:\n subnode['iscurrent'] = True\n subnode = subnode.parent", + "docstring": "Add 'toctree-l%d' and 'current' classes to the toctree.", + "type": "function", + "file_path": "sphinx\\sphinx\\environment\\adapters\\toctree.py", + "ast_data": "FunctionDef name:_toctree_add_classes arg:node arg:depth arg:docname arguments arg arg arg For If Call Call Call If Call Call If Call If Compare If While Call Assign If Call Return return:no While Assign Assign" + }, + { + "library": "pytorch", + "name": "_get_forward_arg_names", + "source_code": "def _get_forward_arg_names(mod: torch.nn.Module, args: tuple[Any, ...], kwargs: Optional[dict[str, Any]]=None) -> list[str]:\n sig = inspect.signature(mod.forward)\n _args = sig.bind_partial(*args).arguments\n names: list[str] = []\n for name, value in _args.items():\n if sig.parameters[name].kind == inspect._ParameterKind.VAR_POSITIONAL:\n names.extend([f'{name}_{i}' for i, _ in enumerate(value)])\n else:\n names.append(name)\n if kwargs:\n names.extend([kwarg for kwarg, _ in kwargs.items()])\n return names", + "docstring": "Gets the argument names to forward that are used, for restoring the original signature when unlifting the exported program module. - Positional args: retain the original argument names, and enumerate *args as args_0, args_1, ... - Keyword args: retain the original kwarg names in the order specified by the user. This order seems to matter for the current state of export lifted modules.", + "type": "function", + "file_path": "pytorch\\torch\\export\\_trace.py", + "ast_data": "FunctionDef name:_get_forward_arg_names arg:mod arg:args arg:kwargs arguments arg arg arg Assign Call Assign Call For Call If Compare Call Call Call If Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "emit", + "source_code": "def emit(self, event: str, *args: Any, allowed_exceptions: tuple[type[Exception], ...]=()) -> list[Any]:\n return self.events.emit(event, *args, allowed_exceptions=allowed_exceptions)", + "docstring": "Emit *event* and pass *arguments* to the callback functions. Return the return values of all callbacks as a list. Do not emit core Sphinx events in extensions! :param event: The name of event that will be emitted :param args: The arguments for the event :param allowed_exceptions: The list of exceptions that are allowed in the callbacks .. versionchanged:: 3.1 Added *allowed_exceptions* to specify path-through exceptions", + "type": "method", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "FunctionDef name:emit arg:self arg:event arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "essential_from_Rt", + "source_code": "def essential_from_Rt(R1: torch.Tensor, t1: torch.Tensor, R2: torch.Tensor, t2: torch.Tensor) -> torch.Tensor:\n KORNIA_CHECK_SHAPE(R1, ['*', '3', '3'])\n KORNIA_CHECK_SHAPE(R2, ['*', '3', '3'])\n KORNIA_CHECK_SHAPE(t1, ['*', '3', '1'])\n KORNIA_CHECK_SHAPE(t2, ['*', '3', '1'])\n R, t = relative_camera_motion(R1, t1, R2, t2)\n Tx = cross_product_matrix(t[..., 0])\n return Tx @ R", + "docstring": "Get the Essential matrix from Camera motion (Rs and ts). Reference: Hartley/Zisserman 9.6 pag 257 (formula 9.12) Args: R1: The first camera rotation matrix with shape :math:. t1: The first camera translation vector with shape :math:. R2: The second camera rotation matrix with shape :math:. t2: The second camera translation vector with shape :math:. Returns: The Essential matrix with the shape :math:.", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\epipolar\\essential.py", + "ast_data": "FunctionDef name:essential_from_Rt arg:R1 arg:t1 arg:R2 arg:t2 arguments arg arg arg arg Call Call Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scrapy", + "name": "CSVFeedSpider", + "source_code": "class CSVFeedSpider(Spider):\n delimiter: str | None = None\n quotechar: str | None = None\n headers: list[str] | None = None\n\n def process_results(self, response: Response, results: Iterable[Any]) -> Iterable[Any]:\n return results\n\n def adapt_response(self, response: Response) -> Response:\n return response\n\n def parse_row(self, response: Response, row: dict[str, str]) -> Any:\n raise NotImplementedError\n\n def parse_rows(self, response: Response) -> Any:\n for row in csviter(response, self.delimiter, self.headers, quotechar=self.quotechar):\n ret = iterate_spider_output(self.parse_row(response, row))\n yield from self.process_results(response, ret)\n\n def _parse(self, response: Response, **kwargs: Any) -> Any:\n if not hasattr(self, 'parse_row'):\n raise NotConfigured('You must define parse_row method in order to scrape this CSV feed')\n response = self.adapt_response(response)\n return self.parse_rows(response)", + "docstring": "Spider for parsing CSV feeds. It receives a CSV file in a response; iterates through each of its rows, and calls parse_row with a dict containing each field's data. You can set some options regarding the CSV file, such as the delimiter, quotechar and the file's headers.", + "type": "class", + "file_path": "scrapy\\scrapy\\spiders\\feed.py", + "ast_data": "ClassDef name:CSVFeedSpider FunctionDef name:process_results arg:self arg:response arg:results arguments arg arg arg Return return:yes FunctionDef name:adapt_response arg:self arg:response arguments arg arg Return return:yes FunctionDef name:parse_row arg:self arg:response arg:row arguments arg arg arg Raise FunctionDef name:parse_rows arg:self arg:response arguments arg arg For Call Assign Call Call Call FunctionDef name:_parse arg:self arg:response arguments arg arg arg If Call Raise Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "add_session_log", + "source_code": "def add_session_log(self, session_log, global_step=None):\n event = event_pb2.Event(session_log=session_log)\n self._add_event(event, global_step)", + "docstring": "Adds a protocol buffer to the event file. This method wraps the provided session in an protocol buffer and adds it to the event file. Args: session_log: A protocol buffer. global_step: Number. Optional global step value to record with the summary.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py", + "ast_data": "FunctionDef name:add_session_log arg:self arg:session_log arg:global_step arguments arg arg arg Assign Call Call" + }, + { + "library": "sphinx", + "name": "glossary", + "source_code": "class glossary(nodes.Element):\n pass", + "docstring": "Node to insert a glossary.", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:glossary" + }, + { + "library": "django", + "name": "_listarr", + "source_code": "def _listarr(self, func):\n return [func(self.ptr, i) for i in range(len(self))]", + "docstring": "Internal routine that returns a sequence (list) corresponding with the given function.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:_listarr arg:self arg:func arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, profile_datum):\n self.total_op_time = profile_datum.op_time\n self.total_exec_time = profile_datum.exec_time\n device_and_node = '%s:%s' % (profile_datum.device_name, profile_datum.node_exec_stats.node_name)\n self._node_to_exec_count = {device_and_node: 1}", + "docstring": "Constructor. Args: profile_datum: () an instance of to initialize this object with.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\profiling.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:profile_datum arguments arg arg Assign Assign Assign Assign" + }, + { + "library": "django", + "name": "iterative_dfs", + "source_code": "def iterative_dfs(self, start, forwards=True):\n visited = []\n visited_set = set()\n stack = [(start, False)]\n while stack:\n node, processed = stack.pop()\n if node in visited_set:\n pass\n elif processed:\n visited_set.add(node)\n visited.append(node.key)\n else:\n stack.append((node, True))\n stack += [(n, False) for n in sorted(node.parents if forwards else node.children)]\n return visited", + "docstring": "Iterative depth-first search for finding dependencies.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\graph.py", + "ast_data": "FunctionDef name:iterative_dfs arg:self arg:start arg:forwards arguments arg arg arg Assign Assign Call Assign While Assign Call If Compare If Call Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "overlaps", + "source_code": "def overlaps(self, other):\n return capi.geos_overlaps(self.ptr, other.ptr)", + "docstring": "Return true if the DE-9IM intersection matrix for the two Geometries is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves).", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:overlaps arg:self arg:other arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_cuda_device_context", + "source_code": "def get_cuda_device_context(gm: torch.fx.GraphModule) -> AbstractContextManager[None]:\n if not torch.cuda.is_available():\n return contextlib.nullcontext()\n cuda_devices: OrderedSet[torch.device] = OrderedSet((device for device in get_all_devices(gm) if device.type == 'cuda'))\n return torch.cuda.device(next(iter(cuda_devices))) if len(cuda_devices) == 1 else contextlib.nullcontext()", + "docstring": "Returns a cuda device context manager if there is a single device in the graph", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\compile_fx.py", + "ast_data": "FunctionDef name:get_cuda_device_context arg:gm arguments arg If Call Return return:yes Call Call Call Compare Return return:yes Compare Call Call Call Call Call" + }, + { + "library": "scipy", + "name": "__new__", + "source_code": "def __new__(cls, *system):\n if cls is lti:\n N = len(system)\n if N == 2:\n return TransferFunctionContinuous.__new__(TransferFunctionContinuous, *system)\n elif N == 3:\n return ZerosPolesGainContinuous.__new__(ZerosPolesGainContinuous, *system)\n elif N == 4:\n return StateSpaceContinuous.__new__(StateSpaceContinuous, *system)\n else:\n raise ValueError('`system` needs to be an instance of `lti` or have 2, 3 or 4 arguments.')\n return super().__new__(cls)", + "docstring": "Create an instance of the appropriate subclass.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:__new__ arg:cls arguments arg arg If Compare Assign Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_dir_additions_for_owner", + "source_code": "@final\n@cache_readonly\ndef _dir_additions_for_owner(self) -> set[str_t]:\n return {c for c in self.unique(level=0)[:get_option('display.max_dir_items')] if isinstance(c, str) and c.isidentifier()}", + "docstring": "Add the string-like labels to the owner dataframe/series dir output. If this is a MultiIndex, it's first level values are used.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_dir_additions_for_owner arg:self arguments arg Return return:yes Call Call BoolOp Call Call" + }, + { + "library": "scikit-learn", + "name": "TweedieRegressor", + "source_code": "class TweedieRegressor(_GeneralizedLinearRegressor):\n _parameter_constraints: dict = {**_GeneralizedLinearRegressor._parameter_constraints, 'power': [Interval(Real, None, None, closed='neither')], 'link': [StrOptions({'auto', 'identity', 'log'})]}\n\n def __init__(self, *, power=0.0, alpha=1.0, fit_intercept=True, link='auto', solver='lbfgs', max_iter=100, tol=0.0001, warm_start=False, verbose=0):\n super().__init__(alpha=alpha, fit_intercept=fit_intercept, solver=solver, max_iter=max_iter, tol=tol, warm_start=warm_start, verbose=verbose)\n self.link = link\n self.power = power\n\n def _get_loss(self):\n if self.link == 'auto':\n if self.power <= 0:\n return HalfTweedieLossIdentity(power=self.power)\n else:\n return HalfTweedieLoss(power=self.power)\n if self.link == 'log':\n return HalfTweedieLoss(power=self.power)\n if self.link == 'identity':\n return HalfTweedieLossIdentity(power=self.power)", + "docstring": "Generalized Linear Model with a Tweedie distribution. This estimator can be used to model different GLMs depending on the `User Guide n_samplesn_featuresn_features[1, inf)`max{|g_j|, j = 1, ..., d} >> from sklearn import linear_model >>> clf = linear_model.TweedieRegressor() >>> X = [[1, 2], [2, 3], [3, 4], [4, 3]] >>> y = [2, 3.5, 5, 5.5] >>> clf.fit(X, y) TweedieRegressor() >>> clf.score(X, y) np.float64(0.839) >>> clf.coef_ array([0.599, 0.299]) >>> clf.intercept_ np.float64(1.600) >>> clf.predict([[1, 1], [3, 4]]) array([2.500, 4.599])", + "type": "class", + "file_path": "scikit-learn\\sklearn\\linear_model\\_glm\\glm.py", + "ast_data": "ClassDef name:TweedieRegressor Call Call FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg arg arg arg arg Call Call Assign Assign FunctionDef name:_get_loss arg:self arguments arg If Compare If Compare Return return:yes Call Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call" + }, + { + "library": "numpy", + "name": "mypy", + "source_code": "@click.command(context_settings={'ignore_unknown_options': True})\n@click.pass_context\ndef mypy(ctx):\n env = os.environ\n env['NPY_RUN_MYPY_IN_TESTSUITE'] = '1'\n ctx.params['pytest_args'] = [os.path.join('numpy', 'typing')]\n ctx.params['markexpr'] = 'full'\n ctx.forward(test)", + "docstring": "🦆 Run Mypy tests for NumPy", + "type": "function", + "file_path": "numpy\\.spin\\cmds.py", + "ast_data": "FunctionDef name:mypy arg:ctx arguments arg Assign Assign Assign Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "cluster_spec", + "source_code": "def cluster_spec(self):\n request_body = {'instanceState': 'RUNNING'}\n request = self._service.instanceGroups().listInstances(project=self._project, zone=self._zone, instanceGroups=self._instance_group, body=request_body, orderBy='name')\n worker_list = []\n while request is not None:\n response = request.execute()\n items = response['items']\n for instance in items:\n instance_name = instance['instance'].split('/')[-1]\n instance_request = self._service.instances().get(project=self._project, zone=self._zone, instance=instance_name)\n if instance_request is not None:\n instance_details = instance_request.execute()\n ip_address = instance_details['networkInterfaces'][0]['networkIP']\n instance_url = '%s:%s' % (ip_address, self._port)\n worker_list.append(instance_url)\n request = self._service.instanceGroups().listInstances_next(previous_request=request, previous_response=response)\n worker_list.sort()\n return ClusterSpec({self._task_type: worker_list})", + "docstring": "Returns a ClusterSpec object based on the latest instance group info. This returns a ClusterSpec object for use based on information from the specified instance group. We will retrieve the information from the GCE APIs every time this method is called. Returns: A ClusterSpec containing host information retrieved from GCE.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\gce_cluster_resolver.py", + "ast_data": "FunctionDef name:cluster_spec arg:self arguments arg Assign Assign Call Call Assign While Compare Assign Call Assign For Assign Call Assign Call Call If Compare Assign Call Assign Assign Call Assign Call Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "contourf", + "source_code": "@_preprocess_data()\n@_docstring.interpd\ndef contourf(self, *args, **kwargs):\n kwargs['filled'] = True\n contours = mcontour.QuadContourSet(self, *args, **kwargs)\n self._request_autoscale_view()\n return contours", + "docstring": "Plot filled contours. Call signature:: contourf([X, Y,] Z, /, [levels], **kwargs) The arguments *X*, *Y*, *Z* are positional-only. %(contour_doc)s", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", + "ast_data": "FunctionDef name:contourf arg:self arguments arg arg arg Assign Assign Call Call Return return:yes Call" + }, + { + "library": "django", + "name": "get_reverse_related_filter", + "source_code": "def get_reverse_related_filter(self, obj):\n base_q = Q.create([(rh_field.attname, getattr(obj, lh_field.attname)) for lh_field, rh_field in self.related_fields])\n descriptor_filter = self.get_extra_descriptor_filter(obj)\n if isinstance(descriptor_filter, dict):\n return base_q & Q(**descriptor_filter)\n elif descriptor_filter:\n return base_q & descriptor_filter\n return base_q", + "docstring": "Complement to get_forward_related_filter(). Return the keyword arguments that when passed to self.related_field.model.object.filter() select all instances of self.related_field.model related through this field to obj. obj is an instance of self.model.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\related.py", + "ast_data": "FunctionDef name:get_reverse_related_filter arg:self arg:obj arguments arg arg Assign Call Call Assign Call If Call Return return:yes Call If Return return:yes Return return:yes" + }, + { + "library": "numpy", + "name": "Sort", + "source_code": "class Sort(Benchmark):\n params = [['quick', 'merge', 'heap'], ['float64', 'int64', 'float32', 'uint32', 'int32', 'int16', 'float16'], [('random',), ('ordered',), ('reversed',), ('uniform',), ('sorted_block', 10), ('sorted_block', 100), ('sorted_block', 1000)]]\n param_names = ['kind', 'dtype', 'array_type']\n ARRAY_SIZE = 10000\n\n def setup(self, kind, dtype, array_type):\n rnd = np.random.RandomState(507582308)\n array_class = array_type[0]\n self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:], rnd)\n\n def time_sort(self, kind, dtype, array_type):\n np.sort(self.arr, kind=kind)\n\n def time_argsort(self, kind, dtype, array_type):\n np.argsort(self.arr, kind=kind)", + "docstring": "This benchmark tests sorting performance with several different types of arrays that are likely to appear in real-world applications.", + "type": "class", + "file_path": "numpy\\benchmarks\\benchmarks\\bench_function_base.py", + "ast_data": "ClassDef name:Sort Assign Assign Assign FunctionDef name:setup arg:self arg:kind arg:dtype arg:array_type arguments arg arg arg arg Assign Call Assign Assign Call Call FunctionDef name:time_sort arg:self arg:kind arg:dtype arg:array_type arguments arg arg arg arg Call FunctionDef name:time_argsort arg:self arg:kind arg:dtype arg:array_type arguments arg arg arg arg Call" + }, + { + "library": "scrapy", + "name": "connect", + "source_code": "def connect(self, receiver: Any, signal: Any, **kwargs: Any) -> None:\n kwargs.setdefault('sender', self.sender)\n dispatcher.connect(receiver, signal, **kwargs)", + "docstring": "Connect a receiver function to a signal. The signal can be any object, although Scrapy comes with some predefined signals that are documented in the :ref: section. :param receiver: the function to be connected :type receiver: collections.abc.Callable :param signal: the signal to connect to :type signal: object", + "type": "method", + "file_path": "scrapy\\scrapy\\signalmanager.py", + "ast_data": "FunctionDef name:connect arg:self arg:receiver arg:signal arguments arg arg arg arg Call Call" + }, + { + "library": "matplotlib", + "name": "get_tick_space", + "source_code": "def get_tick_space(self):\n raise NotImplementedError()", + "docstring": "Return the estimated number of ticks that can fit on the axis.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:get_tick_space arg:self arguments arg Raise Call" + }, + { + "library": "tensorflow", + "name": "nrows", + "source_code": "def nrows(self):\n if self.rank == 0:\n return None\n return self._ragged_shape[0]", + "docstring": "The number of rows in this StructuredTensor (if rank>0). This means the length of the outer-most dimension of the StructuredTensor. Notice that if , then this equals the number of rows of the first row partition. That is, . Otherwise will be the first dimension of the field values. Returns: A scalar integer (or if ).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py", + "ast_data": "FunctionDef name:nrows arg:self arguments arg If Compare Return return:no Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, fig, *args, horizontal=None, vertical=None, aspect=None, anchor='C'):\n self.figure = fig\n super().__init__(fig, [0, 0, 1, 1], horizontal=horizontal or [], vertical=vertical or [], aspect=aspect, anchor=anchor)\n self.set_subplotspec(SubplotSpec._from_subplot_args(fig, args))", + "docstring": "Parameters ---------- fig : *args : tuple (*nrows*, *ncols*, *index*) or int The array of subplots in the figure has dimensions `~mpl_toolkits.axes_grid1.axes_size~mpl_toolkits.axes_grid1.axes_size`, optional Sizes for vertical division. aspect : bool, optional Whether overall rectangular area is reduced so that the relative part of the horizontal and vertical scales have the same scale. anchor : (float, float) or {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W'}, default: 'C' Placement of the reduced rectangle, when *aspect* is True.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:fig arguments arg arg arg arg arg arg arg Assign Call Call BoolOp BoolOp Call Call" + }, + { + "library": "tensorflow", + "name": "_set_converter_options_for_float", + "source_code": "def _set_converter_options_for_float(self, converter: TFLiteConverter) -> TFLiteConverter:\n if converter.optimizations:\n converter.optimizations = []\n return converter", + "docstring": "Verify converter options and set required experimental options.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\debugging\\python\\debugger.py", + "ast_data": "FunctionDef name:_set_converter_options_for_float arg:self arg:converter arguments arg arg If Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, key_dtype, value_dtype):\n self._key_dtype = dtypes.as_dtype(key_dtype)\n self._value_dtype = dtypes.as_dtype(value_dtype)", + "docstring": "Construct a table initializer object. Args: key_dtype: Type of the table keys. value_dtype: Type of the table values.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:key_dtype arg:value_dtype arguments arg arg arg Assign Call Assign Call" + }, + { + "library": "tensorflow", + "name": "rgb_to_yuv", + "source_code": "@tf_export('image.rgb_to_yuv')\n@dispatch.add_dispatch_support\ndef rgb_to_yuv(images):\n images = ops.convert_to_tensor(images, name='images')\n kernel = ops.convert_to_tensor(_rgb_to_yuv_kernel, dtype=images.dtype, name='kernel')\n ndims = images.get_shape().ndims\n return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])", + "docstring": "Converts one or more images from RGB to YUV. Outputs a tensor of the same shape as the tensor, containing the YUV value of the pixels. The output is only well defined if the value in images are in [0, 1]. There are two ways of representing an image: [0, 255] pixel values range or [0, 1] (as float) pixel values range. Users need to convert the input image into a float [0, 1] range. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py", + "ast_data": "FunctionDef name:rgb_to_yuv arg:images arguments arg Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_min_matrix_dim_tensor", + "source_code": "def _min_matrix_dim_tensor(self):\n return math_ops.reduce_min(self.shape_tensor()[-2:])", + "docstring": "Minimum of domain/range dimension, as a tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_zeros.py", + "ast_data": "FunctionDef name:_min_matrix_dim_tensor arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "NonOwningLayout", + "source_code": "class NonOwningLayout(Layout):\n\n def __init__(self, view: Union[BaseView, TensorBox]) -> None:\n layout = view.get_layout()\n super().__init__(layout.device, layout.dtype, layout.size, layout.stride)\n self.view = view\n\n def make_indexer(self) -> Callable[[Sequence[Expr]], Expr]:\n return self.as_fixed().make_indexer()\n\n def maybe_guard_aligned(self):\n offset = self.view.get_layout().offset\n if offset == 0:\n return True\n from .utils import ALIGNMENT\n return V.graph.sizevars.statically_known_multiple_of(offset, ALIGNMENT)", + "docstring": "Is a view into the storage of another tensor", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "ClassDef name:NonOwningLayout FunctionDef name:__init__ arg:self arg:view arguments arg arg Assign Call Call Call Assign FunctionDef name:make_indexer arg:self arguments arg Return return:yes Call Call FunctionDef name:maybe_guard_aligned arg:self arguments arg Assign Call If Compare Return return:yes Return return:yes Call" + }, + { + "library": "numpy", + "name": "FuncNameSuffix", + "source_code": "class FuncNameSuffix:\n\n def __init__(self, suffix):\n self.suffix = suffix", + "docstring": "Stores the suffix to append when generating functions names.", + "type": "class", + "file_path": "numpy\\numpy\\_core\\code_generators\\generate_umath.py", + "ast_data": "ClassDef name:FuncNameSuffix FunctionDef name:__init__ arg:self arg:suffix arguments arg arg Assign" + }, + { + "library": "matplotlib", + "name": "get", + "source_code": "def get(self):\n return dict(self._params)", + "docstring": "Return copy of the parameters for the layout engine.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py", + "ast_data": "FunctionDef name:get arg:self arguments arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "pbdv_seq", + "source_code": "def pbdv_seq(v, x):\n if not (isscalar(v) and isscalar(x)):\n raise ValueError('arguments must be scalars.')\n n = int(v)\n v0 = v - n\n if n < 1:\n n1 = 1\n else:\n n1 = n\n v1 = n1 + v0\n dv, dp, pdf, pdd = _specfun.pbdv(v1, x)\n return (dv[:n1 + 1], dp[:n1 + 1])", + "docstring": "Parabolic cylinder functions Dv(x) and derivatives. Parameters ---------- v : float Order of the parabolic cylinder function x : float Value at which to evaluate the function and derivatives Returns ------- dv : ndarray Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. dp : ndarray Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996, chapter 13.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_basic.py", + "ast_data": "FunctionDef name:pbdv_seq arg:v arg:x arguments arg arg If BoolOp Call Call Raise Call Assign Call Assign If Compare Assign Assign Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_BesselI0eGrad", + "source_code": "@ops.RegisterGradient('BesselI0e')\ndef _BesselI0eGrad(op: ops.Operation, grad):\n x = op.inputs[0]\n y = op.outputs[0]\n with ops.control_dependencies([grad]):\n partial_x = special_math_ops.bessel_i1e(x) - math_ops.sign(x) * y\n return grad * partial_x", + "docstring": "Compute gradient of bessel_i0e(x) with respect to its argument.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_BesselI0eGrad arg:op arg:grad arguments arg arg Assign Assign With Call Assign Call Call Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "__call__", + "source_code": "def __call__(self, environ, start_response):\n return _TrappedResponse(self.nextapp, environ, start_response, self.throws)", + "docstring": "Handle exceptions while processing a WSGI request.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpwsgi.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:environ arg:start_response arguments arg arg arg Return return:yes Call" + }, + { + "library": "kornia", + "name": "get_sift_bin_ksize_stride_pad", + "source_code": "def get_sift_bin_ksize_stride_pad(patch_size: int, num_spatial_bins: int) -> Tuple[int, int, int]:\n ksize: int = 2 * int(patch_size / (num_spatial_bins + 1))\n stride: int = patch_size // num_spatial_bins\n pad: int = ksize // 4\n out_size: int = (patch_size + 2 * pad - (ksize - 1) - 1) // stride + 1\n if out_size != num_spatial_bins:\n raise ValueError(f'Patch size {patch_size} is incompatible with requested number of spatial bins {num_spatial_bins} for SIFT descriptor. Usually it happens when patch size is too small for num_spatial_bins specified')\n return (ksize, stride, pad)", + "docstring": "Return a tuple with SIFT parameters. Args: patch_size: the given patch size. num_spatial_bins: the ggiven number of spatial bins. Returns: ksize, stride, pad.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\siftdesc.py", + "ast_data": "FunctionDef name:get_sift_bin_ksize_stride_pad arg:patch_size arg:num_spatial_bins arguments arg arg Call If Compare Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "OpSupports", + "source_code": "@compatibility(is_backward_compatible=False)\nclass OpSupports:\n\n @classmethod\n def decline_if_input_dtype(cls, dtype: torch.dtype) -> OperatorSupportBase:\n\n def _decline_if_input_dtype(submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node) -> bool:\n for arg in node.all_input_nodes:\n arg_dtype = _get_arg_dtype(arg)\n if arg_dtype == dtype:\n return False\n return True\n return create_op_support(_decline_if_input_dtype)\n\n @classmethod\n def decline_if_node_in_names(cls, disallow_set: set[str]) -> OperatorSupportBase:\n\n def _decline_if_node_in_names(submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node) -> bool:\n return node.name not in disallow_set\n return create_op_support(_decline_if_node_in_names)", + "docstring": "A set of atomic instances that can be combined together to form more complex operator support logic.", + "type": "class", + "file_path": "pytorch\\torch\\fx\\passes\\operator_support.py", + "ast_data": "ClassDef name:OpSupports FunctionDef name:decline_if_input_dtype arg:cls arg:dtype arguments arg arg FunctionDef name:_decline_if_input_dtype arg:submodules arg:node arguments arg arg For Assign Call If Compare Return return:yes Return return:yes Return return:yes Call FunctionDef name:decline_if_node_in_names arg:cls arg:disallow_set arguments arg arg FunctionDef name:_decline_if_node_in_names arg:submodules arg:node arguments arg arg Return return:yes Compare Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "@_docstring.interpd\ndef __init__(self, patch, ox, oy, *, shade=0.7, **kwargs):\n super().__init__()\n self.patch = patch\n self._ox, self._oy = (ox, oy)\n self._shadow_transform = transforms.Affine2D()\n self.update_from(self.patch)\n if not 0 <= shade <= 1:\n raise ValueError('shade must be between 0 and 1.')\n color = (1 - shade) * np.asarray(colors.to_rgb(self.patch.get_facecolor()))\n self.update({'facecolor': color, 'edgecolor': color, 'alpha': 0.5, 'zorder': np.nextafter(self.patch.zorder, -np.inf), **kwargs})", + "docstring": "Create a shadow of the given *patch*. By default, the shadow will have the same face color as the *patch*, but darkened. The darkness can be controlled by *shade*. Parameters ---------- patch : The patch to create the shadow for. ox, oy : float The shift of the shadow in data coordinates, scaled by a factor of dpi/72. shade : float, default: 0.7 How the darkness of the shadow relates to the original color. If 1, the shadow is black, if 0, the shadow has the same color as the *patch*. .. versionadded:: 3.8 **kwargs Properties of the shadow patch. Supported keys are: %(Patch:kwdoc)s", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:patch arg:ox arg:oy arguments arg arg arg arg arg arg Call Call Assign Assign Assign Call Call If Compare Raise Call Assign Call Call Call Call Call" + }, + { + "library": "numpy", + "name": "_ptp", + "source_code": "def _ptp(x):\n return _unsigned_subtract(x.max(), x.min())", + "docstring": "Peak-to-peak value of x. This implementation avoids the problem of signed integer arrays having a peak-to-peak value that cannot be represented with the array's data type. This function returns an unsigned value for signed integer arrays.", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_histograms_impl.py", + "ast_data": "FunctionDef name:_ptp arg:x arguments arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_copy_trackable_to_cpu", + "source_code": "def _copy_trackable_to_cpu(self, object_map):\n if self in object_map:\n for v in self._vars:\n v._copy_trackable_to_cpu(object_map)\n else:\n copied_vars = []\n for v in self._vars:\n v._copy_trackable_to_cpu(object_map)\n copied_vars.append(object_map[v])\n new_var = TPUReplicatedVariable(copied_vars, name=self.name)\n object_map[self] = new_var", + "docstring": "For implementing .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_replicated_variable.py", + "ast_data": "FunctionDef name:_copy_trackable_to_cpu arg:self arg:object_map arguments arg arg If Compare For Call Assign For Call Call Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "_StridedSliceGrad", + "source_code": "@ops.RegisterGradient('StridedSlice')\ndef _StridedSliceGrad(op: ops.Operation, grad):\n begin = op.inputs[1]\n end = op.inputs[2]\n strides = op.inputs[3]\n x = array_ops.shape(op.inputs[0], out_type=begin.dtype)\n x_static = tensor_util.constant_value(x)\n x = x_static if x_static is not None else x\n begin_static = tensor_util.constant_value(begin)\n begin = begin_static if begin_static is not None else begin\n end_static = tensor_util.constant_value(end)\n end = end_static if end_static is not None else end\n strides_static = tensor_util.constant_value(strides)\n strides = strides_static if strides_static is not None else strides\n return (array_ops.strided_slice_grad(x, begin, end, strides, grad, begin_mask=op.get_attr('begin_mask'), end_mask=op.get_attr('end_mask'), ellipsis_mask=op.get_attr('ellipsis_mask'), new_axis_mask=op.get_attr('new_axis_mask'), shrink_axis_mask=op.get_attr('shrink_axis_mask')), None, None, None)", + "docstring": "Gradient for StridedSlice op.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py", + "ast_data": "FunctionDef name:_StridedSliceGrad arg:op arg:grad arguments arg arg Assign Assign Assign Assign Call Assign Call Assign Compare Assign Call Assign Compare Assign Call Assign Compare Assign Call Assign Compare Return return:yes Call Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "convert_nested_bidirectional", + "source_code": "def convert_nested_bidirectional(weights):\n num_weights_per_layer = len(weights) // 2\n forward_weights = preprocess_weights_for_loading(layer.forward_layer, weights[:num_weights_per_layer], original_keras_version, original_backend)\n backward_weights = preprocess_weights_for_loading(layer.backward_layer, weights[num_weights_per_layer:], original_keras_version, original_backend)\n return forward_weights + backward_weights", + "docstring": "Converts layers nested in wrapper. This function uses for converting layers. Args: weights: List of weights values (Numpy arrays). Returns: A list of weights values (Numpy arrays).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py", + "ast_data": "FunctionDef name:convert_nested_bidirectional arg:weights arguments arg Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "RubberbandBase", + "source_code": "class RubberbandBase(ToolBase):\n\n def trigger(self, sender, event, data=None):\n if not self.figure.canvas.widgetlock.available(sender):\n return\n if data is not None:\n self.draw_rubberband(*data)\n else:\n self.remove_rubberband()\n\n def draw_rubberband(self, *data):\n raise NotImplementedError\n\n def remove_rubberband(self):\n pass", + "docstring": "Draw and remove a rubberband.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "ClassDef name:RubberbandBase FunctionDef name:trigger arg:self arg:sender arg:event arg:data arguments arg arg arg arg If Call Return return:no If Compare Call Call FunctionDef name:draw_rubberband arg:self arguments arg arg Raise FunctionDef name:remove_rubberband arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "search", + "source_code": "def search(self, word):\n node = self.root\n for char in word:\n if char in node.children:\n node = node.children[char]\n else:\n return False\n return '' in node.children", + "docstring": "Search whether word is present in the Trie. Returns True if yes, else return False", + "type": "method", + "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py", + "ast_data": "FunctionDef name:search arg:self arg:word arguments arg arg Assign For If Compare Assign Return return:yes Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "convert_aten_tensor", + "source_code": "def convert_aten_tensor(self, node: torch._C.Node):\n args, kwargs = self.get_args_kwargs(node, torch.ops.aten.tensor.default._schema)\n for k in kwargs:\n if k == 'requires_grad':\n kwargs[k] = bool(kwargs[k])\n to_tensor = torch.tensor if all((isinstance(a, int) for a in args)) else torch._refs.tensor\n\n def target(*args, **kwargs):\n if 'dtype' in kwargs and kwargs['dtype'] is not None:\n kwargs['dtype'] = _TORCH_ENUM_TO_DTYPE[kwargs['dtype']]\n return to_tensor(*args, **kwargs)\n output_name = node.output().debugName()\n fx_node = self.fx_graph.call_function(target, args, kwargs)\n self.name_to_node[output_name] = fx_node", + "docstring": "aten::tensor creates a constant tensor ad-hoc --> GetAttr", + "type": "method", + "file_path": "pytorch\\torch\\_export\\converter.py", + "ast_data": "FunctionDef name:convert_aten_tensor arg:self arg:node arguments arg arg Assign Call For If Compare Assign Call Assign Call Call FunctionDef name:target arguments arg arg If BoolOp Compare Compare Assign Return return:yes Call Assign Call Call Assign Call Assign" + }, + { + "library": "pandas", + "name": "_validate_names", + "source_code": "def _validate_names(self) -> None:\n raise AbstractMethodError(self)", + "docstring": "Validate names. This method will check if names is a list-like and aligns with length of parse nodes. Raises ------ ValueError * If value is not a list and less then length of nodes.", + "type": "method", + "file_path": "pandas\\pandas\\io\\xml.py", + "ast_data": "FunctionDef name:_validate_names arg:self arguments arg Raise Call" + }, + { + "library": "tensorflow", + "name": "_unblockify", + "source_code": "def _unblockify(self, x):\n if x.shape.is_fully_defined():\n x_shape = x.shape.as_list()\n x_leading_shape = x_shape[:-self.block_depth]\n x_block_shape = x_shape[-self.block_depth:]\n flat_shape = x_leading_shape + [np.prod(x_block_shape)]\n else:\n x_shape = array_ops.shape(x)\n x_leading_shape = x_shape[:-self.block_depth]\n x_block_shape = x_shape[-self.block_depth:]\n flat_shape = array_ops.concat((x_leading_shape, [math_ops.reduce_prod(x_block_shape)]), 0)\n return array_ops.reshape(x, flat_shape)", + "docstring": "Flatten the trailing block dimensions.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_circulant.py", + "ast_data": "FunctionDef name:_unblockify arg:self arg:x arguments arg arg If Call Assign Call Assign Assign Assign Call Assign Call Assign Assign Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "tolist", + "source_code": "def tolist(self):\n _warn_typed_storage_removal()\n return list(self)", + "docstring": "Return a list containing the elements of this storage.", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:tolist arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "get_optical_pose_base", + "source_code": "def get_optical_pose_base(pinholes: Tensor) -> Tensor:\n if not (len(pinholes.shape) == 2 and pinholes.shape[1] == 12):\n raise AssertionError(pinholes.shape)\n raise NotImplementedError", + "docstring": "Compute extrinsic transformation matrices for pinholes. Args: pinholes: tensor of form [fx fy cx cy h w rx ry rz tx ty tz] of size (N, 12). Returns: tensor of extrinsic transformation matrices of size (N, 4, 4).", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", + "ast_data": "FunctionDef name:get_optical_pose_base arg:pinholes arguments arg If BoolOp Compare Call Compare Raise Call Raise" + }, + { + "library": "tensorflow", + "name": "sparse_segment_sqrt_n_v2", + "source_code": "@tf_export('sparse.segment_sqrt_n', v1=[])\ndef sparse_segment_sqrt_n_v2(data, indices, segment_ids, num_segments=None, name=None, sparse_gradient=False):\n return sparse_segment_sqrt_n(data, indices, segment_ids, name=name, num_segments=num_segments, sparse_gradient=sparse_gradient)", + "docstring": "Computes the sum along sparse segments of a tensor divided by the sqrt(N). Read [the section on segmentation]( for an explanation of segments. Like , but instead of dividing by the size of the segment, , divide by instead. Args: data: A with data that will be assembled in the output. indices: A 1-D with indices into . Has same rank as . segment_ids: A 1-D with indices into the output . Values should be sorted and can be repeated. num_segments: An optional int32 scalar. Indicates the size of the output . name: A name for the operation (optional). sparse_gradient: An optional . Defaults to . If , the gradient of this function will be sparse () instead of dense (). The sparse gradient will contain one non-zero row for each unique index in . Returns: A of the shape as data, except for dimension 0 which has size , the number of segments specified via or inferred for the last element in .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:sparse_segment_sqrt_n_v2 arg:data arg:indices arg:segment_ids arg:num_segments arg:name arg:sparse_gradient arguments arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "mark_failed", + "source_code": "def mark_failed(self, e):\n with self._queue_lock:\n if self._inflight_closure_count < 1:\n raise AssertionError('There is no inflight closures to mark_failed.')\n if self._error is None:\n self._error = e\n self.inflight_closure_count -= 1\n if self._inflight_closure_count == 0:\n self._no_inflight_closure_condition.notify_all()\n self._stop_waiting_condition.notify_all()", + "docstring": "Sets error and unblocks any wait() call.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", + "ast_data": "FunctionDef name:mark_failed arg:self arg:e arguments arg arg With If Compare Raise Call If Compare Assign If Compare Call Call" + }, + { + "library": "tensorflow", + "name": "ones_like", + "source_code": "@dispatch.dispatch_for_types(array_ops.ones_like, StructuredTensor)\ndef ones_like(tensor, dtype=None, name=None, optimize=True):\n del optimize\n return ones_like_v2(tensor, dtype=dtype, name=name)", + "docstring": "Implementation of zeros_like for StructuredTensor for TF v1.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py", + "ast_data": "FunctionDef name:ones_like arg:tensor arg:dtype arg:name arg:optimize arguments arg arg arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "read", + "source_code": "def read(self, wkb):\n return GEOSGeometry(super().read(wkb))", + "docstring": "Return a GEOSGeometry for the given WKB buffer.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\io.py", + "ast_data": "FunctionDef name:read arg:self arg:wkb arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "django", + "name": "BaseUserCreationForm", + "source_code": "class BaseUserCreationForm(SetPasswordMixin, forms.ModelForm):\n password1, password2 = SetPasswordMixin.create_password_fields()\n\n class Meta:\n model = User\n fields = ('username',)\n field_classes = {'username': UsernameField}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self._meta.model.USERNAME_FIELD in self.fields:\n self.fields[self._meta.model.USERNAME_FIELD].widget.attrs['autofocus'] = True\n\n def clean(self):\n self.validate_passwords()\n return super().clean()\n\n def _post_clean(self):\n super()._post_clean()\n self.validate_password_for_user(self.instance)\n\n def save(self, commit=True):\n user = super().save(commit=False)\n user = self.set_password_and_save(user, commit=commit)\n if commit and hasattr(self, 'save_m2m'):\n self.save_m2m()\n return user", + "docstring": "A form that creates a user, with no privileges, from the given username and password. This is the documented base class for customizing the user creation form. It should be kept mostly unchanged to ensure consistency and compatibility.", + "type": "class", + "file_path": "django\\django\\contrib\\auth\\forms.py", + "ast_data": "ClassDef name:BaseUserCreationForm Assign Call ClassDef name:Meta Assign Assign Assign FunctionDef name:__init__ arg:self arguments arg arg arg Call Call If Compare Assign FunctionDef name:clean arg:self arguments arg Call Return return:yes Call Call FunctionDef name:_post_clean arg:self arguments arg Call Call Call FunctionDef name:save arg:self arg:commit arguments arg arg Assign Call Call Assign Call If BoolOp Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "range_end", + "source_code": "def range_end(range_id) -> None:\n _nvtx.rangeEnd(range_id)", + "docstring": "Mark the end of a range for a given range_id. Args: range_id (int): an unique handle for the start range.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\nvtx.py", + "ast_data": "FunctionDef name:range_end arg:range_id arguments arg Call" + }, + { + "library": "pandas", + "name": "extend_base_template", + "source_code": "def extend_base_template(content: str, base_template: str) -> str:\n result = '{% extends \"' + base_template + '\" %}'\n result += '{% block body %}'\n result += content\n result += '{% endblock %}'\n return result", + "docstring": "Wrap document to extend the base template, before it is rendered with Jinja2.", + "type": "function", + "file_path": "pandas\\web\\pandas_web.py", + "ast_data": "FunctionDef name:extend_base_template arg:content arg:base_template arguments arg arg Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_row_partition_type_tensor_pairs", + "source_code": "def _get_row_partition_type_tensor_pairs(rt_input):\n partitions = rt_input._nested_row_partitions\n tail = [_get_row_partition_type_tensor_pairs_tail(x) for x in partitions[1:]]\n if partitions[0]._value_rowids is not None:\n return [('FIRST_DIM_SIZE', partitions[0].nrows()), ('VALUE_ROWIDS', partitions[0].value_rowids())] + tail\n else:\n return [('ROW_SPLITS', partitions[0].row_splits())] + tail", + "docstring": "Gets a list of the row partitions for rt_input. If value_rowids are defined, then they are used. Otherwise, row_splits are used. If the outermost level has value_rowids defind, then nrows is also added. Args: rt_input: a ragged tensor. Returns: A list of (row_partition_type, row_partition_tensor) pairs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", + "ast_data": "FunctionDef name:_get_row_partition_type_tensor_pairs arg:rt_input arguments arg Assign Assign Call If Compare Return return:yes Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "name", + "source_code": "@property\ndef name(self) -> str:\n raise AbstractMethodError(self)", + "docstring": "A string identifying the data type. Will be used for display in, e.g. ``", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\base.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Raise Call" + }, + { + "library": "django", + "name": "OracleParam", + "source_code": "class OracleParam:\n\n def __init__(self, param, cursor, strings_only=False):\n if settings.USE_TZ and (isinstance(param, datetime.datetime) and (not isinstance(param, Oracle_datetime))):\n param = Oracle_datetime.from_datetime(param)\n string_size = 0\n has_boolean_data_type = cursor.database.features.supports_boolean_expr_in_select_clause\n if not has_boolean_data_type:\n if param is True:\n param = 1\n elif param is False:\n param = 0\n if hasattr(param, 'bind_parameter'):\n self.force_bytes = param.bind_parameter(cursor)\n elif isinstance(param, (Database.Binary, datetime.timedelta)):\n self.force_bytes = param\n else:\n self.force_bytes = force_str(param, cursor.charset, strings_only)\n if isinstance(self.force_bytes, str):\n string_size = len(force_bytes(param, cursor.charset, strings_only))\n if hasattr(param, 'input_size'):\n self.input_size = param.input_size\n elif string_size > 4000:\n self.input_size = Database.DB_TYPE_CLOB\n elif isinstance(param, datetime.datetime):\n self.input_size = Database.DB_TYPE_TIMESTAMP\n elif has_boolean_data_type and isinstance(param, bool):\n self.input_size = Database.DB_TYPE_BOOLEAN\n else:\n self.input_size = None", + "docstring": "Wrapper object for formatting parameters for Oracle. If the string representation of the value is large enough (greater than 4000 characters) the input size needs to be set as CLOB. Alternatively, if the parameter has an attribute, then the value of the attribute will be used instead. Otherwise, no input size will be set for the parameter when executing the query.", + "type": "class", + "file_path": "django\\django\\db\\backends\\oracle\\base.py", + "ast_data": "ClassDef name:OracleParam FunctionDef name:__init__ arg:self arg:param arg:cursor arg:strings_only arguments arg arg arg arg If BoolOp BoolOp Call Call Assign Call Assign Assign If If Compare Assign If Compare Assign If Call Assign Call If Call Assign Assign Call If Call Assign Call Call If Call Assign If Compare Assign If Call Assign If BoolOp Call Assign Assign" + }, + { + "library": "pytorch", + "name": "_get_special_act_post_process", + "source_code": "def _get_special_act_post_process(module: torch.nn.Module) -> Optional[Callable]:\n return DEFAULT_MODULE_TO_ACT_POST_PROCESS.get(type_before_parametrizations(module), None)", + "docstring": "Get the special activation post process for , this has higher priority than the activation post process in e.g. input: torch.nn.Sigmoid output: default_affine_fixed_qparam_fake_quant", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py", + "ast_data": "FunctionDef name:_get_special_act_post_process arg:module arguments arg Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "_build_system", + "source_code": "def _build_system(y, d, smoothing, kernel, epsilon, powers):\n p = d.shape[0]\n s = d.shape[1]\n r = powers.shape[0]\n kernel_func = NAME_TO_FUNC[kernel]\n mins = np.min(y, axis=0)\n maxs = np.max(y, axis=0)\n shift = (maxs + mins) / 2\n scale = (maxs - mins) / 2\n scale[scale == 0.0] = 1.0\n yeps = y * epsilon\n yhat = (y - shift) / scale\n lhs = np.empty((p + r, p + r), dtype=float).T\n kernel_matrix(yeps, kernel_func, lhs[:p, :p])\n polynomial_matrix(yhat, powers, lhs[:p, p:])\n lhs[p:, :p] = lhs[:p, p:].T\n lhs[p:, p:] = 0.0\n for i in range(p):\n lhs[i, i] += smoothing[i]\n rhs = np.empty((s, p + r), dtype=float).T\n rhs[:p] = d\n rhs[p:] = 0.0\n return (lhs, rhs, shift, scale)", + "docstring": "Build the system used to solve for the RBF interpolant coefficients. Parameters ---------- y : (P, N) float ndarray Data point coordinates. d : (P, S) float ndarray Data values at . smoothing : (P,) float ndarray Smoothing parameter for each data point. kernel : str Name of the RBF. epsilon : float Shape parameter. powers : (R, N) int ndarray The exponents for each monomial in the polynomial. Returns ------- lhs : (P + R, P + R) float ndarray Left-hand side matrix. rhs : (P + R, S) float ndarray Right-hand side matrix. shift : (N,) float ndarray Domain shift used to create the polynomial matrix. scale : (N,) float ndarray Domain scaling used to create the polynomial matrix.", + "type": "function", + "file_path": "scipy\\scipy\\interpolate\\_rbfinterp_pythran.py", + "ast_data": "FunctionDef name:_build_system arg:y arg:d arg:smoothing arg:kernel arg:epsilon arg:powers arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign Call Assign Call Assign Assign Assign Compare Assign Assign Assign Call Call Call Assign Assign For Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "pandas", + "name": "_value_formatter", + "source_code": "def _value_formatter(self, float_format: FloatFormatType | None=None, threshold: float | None=None) -> Callable:\n if float_format is None:\n float_format = self.float_format\n if float_format:\n\n def base_formatter(v):\n assert float_format is not None\n return float_format(value=v) if notna(v) else self.na_rep\n else:\n\n def base_formatter(v):\n return str(v) if notna(v) else self.na_rep\n if self.decimal != '.':\n\n def decimal_formatter(v):\n return base_formatter(v).replace('.', self.decimal, 1)\n else:\n decimal_formatter = base_formatter\n if threshold is None:\n return decimal_formatter\n\n def formatter(value):\n if notna(value):\n if abs(value) > threshold:\n return decimal_formatter(value)\n else:\n return decimal_formatter(0.0)\n else:\n return self.na_rep\n return formatter", + "docstring": "Returns a function to be applied on each value to format it", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\format.py", + "ast_data": "FunctionDef name:_value_formatter arg:self arg:float_format arg:threshold arguments arg arg arg If Compare Assign If FunctionDef name:base_formatter arg:v arguments arg Compare Return return:yes Call Call FunctionDef name:base_formatter arg:v arguments arg Return return:yes Call Call If Compare FunctionDef name:decimal_formatter arg:v arguments arg Return return:yes Call Call Assign If Compare Return return:yes FunctionDef name:formatter arg:value arguments arg If Call If Compare Call Return return:yes Call Return return:yes Call Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "hasnans", + "source_code": "@cache_readonly\ndef hasnans(self) -> bool:\n if self._can_hold_na:\n return bool(self._isnan.any())\n else:\n return False", + "docstring": "Return True if there are any NaNs. Enables various performance speedups. Returns ------- bool See Also -------- Index.isna : Detect missing values. Index.dropna : Return Index without NA/NaN values. Index.fillna : Fill NA/NaN values with the specified value. Examples -------- >>> s = pd.Series([1, 2, 3], index=[\"a\", \"b\", None]) >>> s a 1 b 2 None 3 dtype: int64 >>> s.index.hasnans True", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:hasnans arg:self arguments arg If Return return:yes Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_merge_commit_sha", + "source_code": "def get_merge_commit_sha(repo: GitRepo, pr: GitHubPR) -> Optional[str]:\n commit_sha = get_pr_commit_sha(repo, pr)\n return commit_sha if pr.is_closed() else None", + "docstring": "Return the merge commit SHA iff the PR has been merged. For simplicity, we will only cherry pick PRs that have been merged into main", + "type": "function", + "file_path": "pytorch\\.github\\scripts\\cherry_pick.py", + "ast_data": "FunctionDef name:get_merge_commit_sha arg:repo arg:pr arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "full_like", + "source_code": "@tf_export.tf_export('experimental.numpy.full_like', v1=[])\n@np_utils.np_doc_only('full_like')\ndef full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):\n if order != 'K':\n raise ValueError('Non-standard orders are not supported.')\n if not subok:\n raise ValueError('subok being False is not supported.')\n if shape:\n raise ValueError('Overriding the shape is not supported.')\n a = asarray(a)\n dtype = dtype or np_utils.result_type(a)\n fill_value = asarray(fill_value, dtype=dtype)\n return array_ops.broadcast_to(fill_value, array_ops.shape(a))", + "docstring": "order, subok and shape arguments mustn't be changed.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py", + "ast_data": "FunctionDef name:full_like arg:a arg:fill_value arg:dtype arg:order arg:subok arg:shape arguments arg arg arg arg arg arg If Compare Raise Call If Raise Call If Raise Call Assign Call Assign BoolOp Call Assign Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "experimental_set_type", + "source_code": "def experimental_set_type(self, type_proto) -> None:\n with self.graph._c_graph.get() as c_graph:\n if type_proto.type_id not in (full_type_pb2.TFT_UNSET, full_type_pb2.TFT_PRODUCT):\n raise ValueError('error setting the type of ', self.name, ': expected TFT_UNSET or TFT_PRODUCT, got ', type_proto.type_id)\n with c_api_util.tf_buffer(type_proto.SerializeToString()) as serialized:\n pywrap_tf_session.SetFullType(c_graph, self._c_op, serialized)", + "docstring": "Sets the corresponding node's field. See the description of for more info. Args: type_proto: A FullTypeDef proto message. The root type_if of this object must be , even for ops which only have a singlre return value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:experimental_set_type arg:self arg:type_proto arguments arg arg With Call If Compare Raise Call With Call Call Call" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, optim_cls: type) -> None:\n self.optim_cls = optim_cls", + "docstring": "Initialize the OverlappedOptimizer. Overlappedoptimizer is a base class that child classes can implement to specify how different optimizers will register themselves with DDP.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\algorithms\\_optimizer_overlap\\optimizer_overlap.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:optim_cls arguments arg arg Assign" + }, + { + "library": "pytorch", + "name": "SamplerIterDataPipe", + "source_code": "class SamplerIterDataPipe(IterDataPipe[_T_co]):\n datapipe: IterDataPipe\n sampler: Sampler\n\n def __init__(self, datapipe: IterDataPipe, sampler: type[Sampler]=SequentialSampler, sampler_args: Optional[tuple]=None, sampler_kwargs: Optional[dict]=None) -> None:\n assert isinstance(datapipe, Sized), 'Sampler class requires input datapipe implemented `__len__`'\n super().__init__()\n self.datapipe = datapipe\n self.sampler_args = () if sampler_args is None else sampler_args\n self.sampler_kwargs = {} if sampler_kwargs is None else sampler_kwargs\n self.sampler = sampler(*self.sampler_args, data_source=self.datapipe, **self.sampler_kwargs)\n\n def __iter__(self) -> Iterator[_T_co]:\n return iter(self.sampler)\n\n def __len__(self) -> int:\n if isinstance(self.sampler, Sized):\n return len(self.sampler)\n raise TypeError(f\"{type(self).__name__} instance doesn't have valid length\")", + "docstring": "Generate sample elements using the provided `SequentialSamplerSequentialSampler` for IterDataPipe", + "type": "class", + "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\combinatorics.py", + "ast_data": "ClassDef name:SamplerIterDataPipe FunctionDef name:__init__ arg:self arg:datapipe arg:sampler arg:sampler_args arg:sampler_kwargs arguments arg arg arg arg arg Call Call Call Assign Assign Compare Assign Compare Assign Call FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:__len__ arg:self arguments arg If Call Return return:yes Call Raise Call Call" + }, + { + "library": "pandas", + "name": "_get_indexer_unique_sides", + "source_code": "def _get_indexer_unique_sides(self, target: IntervalIndex) -> npt.NDArray[np.intp]:\n left_indexer = self.left.get_indexer(target.left)\n right_indexer = self.right.get_indexer(target.right)\n indexer = np.where(left_indexer == right_indexer, left_indexer, -1)\n return indexer", + "docstring": "_get_indexer specialized to the case where both of our sides are unique.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\interval.py", + "ast_data": "FunctionDef name:_get_indexer_unique_sides arg:self arg:target arguments arg arg Assign Call Assign Call Assign Call Compare Return return:yes" + }, + { + "library": "pytorch", + "name": "codegen_node", + "source_code": "def codegen_node(self, node: Union[scheduler.FusedSchedulerNode, scheduler.SchedulerNode]):\n nodes: list[scheduler.SchedulerNode] = node.get_nodes()\n _, (numel, rnumel) = max(nodes, key=lambda x: int(x.is_reduction())).group\n node_schedule = self.generate_node_schedule(nodes, numel, rnumel)\n schedule_log.debug('Schedule:\\n %s', node_schedule)\n return self.codegen_node_schedule(SIMDKernelFeatures(node_schedule, numel, rnumel))", + "docstring": "Given a set of pre-fused nodes, generate a Triton kernel.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py", + "ast_data": "FunctionDef name:codegen_node arg:self arg:node arguments arg arg Call Assign Call arguments arg Call Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "FlameNotFoundError", + "source_code": "class FlameNotFoundError(NotFoundError):\n pass", + "docstring": "FLAME ( libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [flame]).", + "type": "class", + "file_path": "numpy\\numpy\\distutils\\system_info.py", + "ast_data": "ClassDef name:FlameNotFoundError" + }, + { + "library": "tensorflow", + "name": "DropoutWrapper", + "source_code": "@deprecated(None, 'Please use tf.keras.layers.RNN instead.')\n@tf_export('nn.RNNCellDropoutWrapper', v1=[])\nclass DropoutWrapper(rnn_cell_wrapper_impl.DropoutWrapperBase, _RNNCellWrapperV2):\n\n def __init__(self, *args, **kwargs):\n super(DropoutWrapper, self).__init__(*args, **kwargs)\n if isinstance(self.cell, recurrent.LSTMCell):\n raise ValueError('keras LSTM cell does not work with DropoutWrapper. Please use LSTMCell(dropout=x, recurrent_dropout=y) instead.')\n __init__.__doc__ = rnn_cell_wrapper_impl.DropoutWrapperBase.__init__.__doc__", + "docstring": "Operator adding dropout to inputs and outputs of the given cell.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\rnn_cell_wrapper_v2.py", + "ast_data": "ClassDef name:DropoutWrapper FunctionDef name:__init__ arg:self arguments arg arg arg Call Call If Call Raise Call Assign Call Call" + }, + { + "library": "django", + "name": "ASGIStaticFilesHandler", + "source_code": "class ASGIStaticFilesHandler(StaticFilesHandlerMixin, ASGIHandler):\n\n def __init__(self, application):\n self.application = application\n self.base_url = urlparse(self.get_base_url())\n\n async def __call__(self, scope, receive, send):\n if scope['type'] == 'http' and self._should_handle(scope['path']):\n return await super().__call__(scope, receive, send)\n return await self.application(scope, receive, send)\n\n async def get_response_async(self, request):\n response = await super().get_response_async(request)\n response._resource_closers.append(request.close)\n if response.streaming and (not response.is_async):\n _iterator = response.streaming_content\n\n async def awrapper():\n for part in await sync_to_async(list)(_iterator):\n yield part\n response.streaming_content = awrapper()\n return response", + "docstring": "ASGI application which wraps another and intercepts requests for static files, passing them off to Django's static file serving.", + "type": "class", + "file_path": "django\\django\\contrib\\staticfiles\\handlers.py", + "ast_data": "ClassDef name:ASGIStaticFilesHandler FunctionDef name:__init__ arg:self arg:application arguments arg arg Assign Assign Call Call AsyncFunctionDef name:__call__ arg:self arg:scope arg:receive arg:send arguments arg arg arg arg If BoolOp Compare Call Return return:yes Call Call Return return:yes Call AsyncFunctionDef name:get_response_async arg:self arg:request arguments arg arg Assign Call Call Call If BoolOp Assign AsyncFunctionDef name:awrapper arguments For Call Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_print_iteration_stats", + "source_code": "def _print_iteration_stats(self, iteration_start_time):\n log_msg = ''\n predictors_of_ith_iteration = [predictors_list for predictors_list in self._predictors[-1] if predictors_list]\n n_trees = len(predictors_of_ith_iteration)\n max_depth = max((predictor.get_max_depth() for predictor in predictors_of_ith_iteration))\n n_leaves = sum((predictor.get_n_leaf_nodes() for predictor in predictors_of_ith_iteration))\n if n_trees == 1:\n log_msg += '{} tree, {} leaves, '.format(n_trees, n_leaves)\n else:\n log_msg += '{} trees, {} leaves '.format(n_trees, n_leaves)\n log_msg += '({} on avg), '.format(int(n_leaves / n_trees))\n log_msg += 'max depth = {}, '.format(max_depth)\n if self.do_early_stopping_:\n if self.scoring == 'loss':\n factor = -1\n name = 'loss'\n else:\n factor = 1\n name = 'score'\n log_msg += 'train {}: {:.5f}, '.format(name, factor * self.train_score_[-1])\n if self._use_validation_data:\n log_msg += 'val {}: {:.5f}, '.format(name, factor * self.validation_score_[-1])\n iteration_time = time() - iteration_start_time\n log_msg += 'in {:0.3f}s'.format(iteration_time)\n print(log_msg)", + "docstring": "Print info about the current fitting iteration.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py", + "ast_data": "FunctionDef name:_print_iteration_stats arg:self arg:iteration_start_time arguments arg arg Assign Assign Assign Call Assign Call Call Assign Call Call If Compare Call Call Call Call Call If If Compare Assign Assign Assign Assign Call If Call Assign Call Call Call" + }, + { + "library": "pandas", + "name": "infer_filename", + "source_code": "def infer_filename(self) -> str | None:\n if isinstance(self.buffer.filename, (os.PathLike, str)):\n filename = Path(self.buffer.filename)\n if filename.suffix == '.zip':\n return filename.with_suffix('').name\n return filename.name\n return None", + "docstring": "If an explicit archive_name is not given, we still want the file inside the zip file not to be named something.zip, because that causes confusion (GH39465).", + "type": "method", + "file_path": "pandas\\pandas\\io\\common.py", + "ast_data": "FunctionDef name:infer_filename arg:self arguments arg If Call Assign Call If Compare Return return:yes Call Return return:yes Return return:no" + }, + { + "library": "tensorflow", + "name": "_dynamic_range_quantize", + "source_code": "def _dynamic_range_quantize(src_saved_model_path: str, dst_saved_model_path: str, quantization_options: _QuantizationOptions) -> autotrackable.AutoTrackable:\n mode_str = 'dynamic-range quantization'\n if _is_qat_saved_model(src_saved_model_path):\n raise ValueError('The models trained with quantization-aware training (QAT) is not supported for %s.' % mode_str)\n logging.info('Running post-training %s on model: %s', mode_str, src_saved_model_path)\n logging.info('QuantizationOptions: \\n%s', quantization_options)\n signature_def_map = save_model.get_signatures_from_saved_model(src_saved_model_path, quantization_options.signature_keys, quantization_options.tags)\n pywrap_quantize_model.quantize_ptq_dynamic_range(src_saved_model_path, dst_saved_model_path, quantization_options_serialized=quantization_options.SerializeToString(), signature_keys=list(quantization_options.signature_keys), signature_def_map_serialized=_serialize_signature_def_map(signature_def_map), py_function_library=py_function_lib.PyFunctionLibrary())\n return saved_model_load.load(dst_saved_model_path)", + "docstring": "Quantizes the given SavedModel via post-training dynamic range quantization. Args: src_saved_model_path: Path to the saved model. dst_saved_model_path: The path to save the output SavedModel. The directory will be overwritten if not empty. quantization_options: QuantizationOptions proto describing quantization related config. Returns: A SavedModel object with TF quantization applied. Raises: ValueError: when the model is QAT model.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\quantize_model.py", + "ast_data": "FunctionDef name:_dynamic_range_quantize arg:src_saved_model_path arg:dst_saved_model_path arg:quantization_options arguments arg arg arg Assign If Call Raise Call Call Call Assign Call Call Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "AutoGraphError", + "source_code": "class AutoGraphError(errors.PyCTError):\n pass", + "docstring": "Base class for all AutoGraph exceptions.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py", + "ast_data": "ClassDef name:AutoGraphError" + }, + { + "library": "tensorflow", + "name": "get_image", + "source_code": "def get_image(width, height, want_grayscale, filepath):\n with ops.Graph().as_default():\n with session.Session():\n file_data = io_ops.read_file(filepath)\n channels = 1 if want_grayscale else 3\n image_tensor = image_ops.decode_image(file_data, channels=channels).eval()\n resized_tensor = image_ops.resize_images_v2(image_tensor, (height, width)).eval()\n return resized_tensor", + "docstring": "Returns an image loaded into an np.ndarray with dims [height, width, (3 or 1)]. Args: width: Width to rescale the image to. height: Height to rescale the image to. want_grayscale: Whether the result should be converted to grayscale. filepath: Path of the image file.. Returns: np.ndarray of shape (height, width, channels) where channels is 1 if want_grayscale is true, otherwise 3.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\tools\\convert_image_to_csv.py", + "ast_data": "FunctionDef name:get_image arg:width arg:height arg:want_grayscale arg:filepath arguments arg arg arg arg With Call Call With Call Assign Call Assign Assign Call Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "restride_A_for_fused_matmul_reduce_scatter", + "source_code": "def restride_A_for_fused_matmul_reduce_scatter(t: torch.Tensor, scatter_dim: int) -> torch.Tensor:\n perm = list(range(len(t.shape)))\n perm.insert(0, perm.pop(scatter_dim))\n return make_contiguous_for_perm(t, perm)", + "docstring": "Restride the arg of for optimal perf. See the doc for for detail.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py", + "ast_data": "FunctionDef name:restride_A_for_fused_matmul_reduce_scatter arg:t arg:scatter_dim arguments arg arg Assign Call Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "prune_all_non_t_attributes", + "source_code": "def prune_all_non_t_attributes(node: node_def_pb2.NodeDef) -> None:\n if 'T' in node.attr:\n t_value = node.attr['T']\n node.ClearField('attr')\n node.attr['T'].CopyFrom(t_value)\n else:\n node.ClearField('attr')", + "docstring": "Prunes all attributes that are not .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py", + "ast_data": "FunctionDef name:prune_all_non_t_attributes arg:node arguments arg If Compare Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "create_python_return_type_bindings_header", + "source_code": "def create_python_return_type_bindings_header(fm: FileManager, pairs: Sequence[PythonSignatureNativeFunctionPair], pred: Callable[[NativeFunction], bool], filename: str) -> None:\n py_return_types_declarations: list[str] = []\n grouped = group_filter_overloads(pairs, pred)\n for name in sorted(grouped.keys(), key=str):\n overloads = grouped[name]\n declarations = generate_return_type_declarations(overloads)\n py_return_types_declarations.append('' if not declarations else '\\n'.join(declarations))\n fm.write_with_template(filename, filename, lambda: {'generated_comment': '@' + f'generated from {fm.template_dir_for_comments()}/{filename}', 'py_return_types_declarations': py_return_types_declarations})", + "docstring": "Generate function to initialize and return named tuple for native functions which returns named tuple and relevant entry for the map in .", + "type": "function", + "file_path": "pytorch\\tools\\autograd\\gen_python_functions.py", + "ast_data": "FunctionDef name:create_python_return_type_bindings_header arg:fm arg:pairs arg:pred arg:filename arguments arg arg arg arg Assign Call For Call Call Assign Assign Call Call Call Call arguments Call" + }, + { + "library": "django", + "name": "do_get_language_info", + "source_code": "@register.tag('get_language_info')\ndef do_get_language_info(parser, token):\n args = token.split_contents()\n if len(args) != 5 or args[1] != 'for' or args[3] != 'as':\n raise TemplateSyntaxError(\"'%s' requires 'for string as variable' (got %r)\" % (args[0], args[1:]))\n return GetLanguageInfoNode(parser.compile_filter(args[2]), args[4])", + "docstring": "Store the language information dictionary for the given language code in a context variable. Usage:: {% get_language_info for LANGUAGE_CODE as l %} {{ l.code }} {{ l.name }} {{ l.name_translated }} {{ l.name_local }} {{ l.bidi|yesno:\"bi-directional,uni-directional\" }}", + "type": "function", + "file_path": "django\\django\\templatetags\\i18n.py", + "ast_data": "FunctionDef name:do_get_language_info arg:parser arg:token arguments arg arg Assign Call If BoolOp Compare Call Compare Compare Raise Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "assign_memory_planning_info_for_scheduler_nodes", + "source_code": "def assign_memory_planning_info_for_scheduler_nodes(nodes: list[BaseSchedulerNode], name_to_fused_node: dict[str, BaseSchedulerNode], name_to_buf: dict[str, SchedulerBuffer], name_to_freeable_input_buf: dict[str, FreeableInputBuffer]) -> None:\n from .scheduler import SchedulerBuffer\n for index, node in enumerate(nodes):\n size_alloc = sum((buffer.mpi_buffer.size_alloc for buffer in node.get_outputs()))\n pred_buffers = OrderedSet[Union[SchedulerBuffer, FreeableInputBuffer]]()\n for dep in node.read_writes.reads:\n if dep.name in name_to_buf and dep in node.unmet_dependencies:\n pred_buffers.add(name_to_buf[dep.name])\n elif dep.name in name_to_freeable_input_buf:\n pred_buffers.add(name_to_freeable_input_buf[dep.name])\n pred_nodes = OrderedSet((name_to_fused_node[pred_buffer.defining_op_name()] for pred_buffer in pred_buffers if isinstance(pred_buffer, SchedulerBuffer)))\n succ_nodes = OrderedSet((succ_node for buffer in node.get_outputs() for succ_node in buffer.mpi_buffer.succ_nodes))\n node.mpi_node = MemoryPlanningInfoForNode(index=index, size=size_alloc, pred_buffers=pred_buffers, pred_nodes=pred_nodes, succ_nodes=succ_nodes)", + "docstring": "Assign to each scheduler node its predecessor and successor nodes.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\memory.py", + "ast_data": "FunctionDef name:assign_memory_planning_info_for_scheduler_nodes arg:nodes arg:name_to_fused_node arg:name_to_buf arg:name_to_freeable_input_buf arguments arg arg arg arg For Call Assign Call Call Assign Call For If BoolOp Compare Compare Call If Compare Call Assign Call Call Call Assign Call Call Assign Call" + }, + { + "library": "django", + "name": "clone", + "source_code": "def clone(self):\n return GEOSGeometry(capi.geom_clone(self.ptr))", + "docstring": "Clone this Geometry.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", + "ast_data": "FunctionDef name:clone arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "get_fuser_method_new", + "source_code": "def get_fuser_method_new(op_pattern: Pattern, fuser_method_mapping: dict[Pattern, Union[nn.Sequential, Callable]]):\n op_patterns = _get_valid_patterns(op_pattern)\n fuser_method = None\n for op_pattern in op_patterns:\n fuser_method = fuser_method_mapping.get(op_pattern, None)\n if fuser_method is not None:\n break\n assert fuser_method is not None, f'did not find fuser method for: {op_pattern} '\n return fuser_method", + "docstring": "Get fuser method. This will be made default after we deprecate the get_fuser_method Would like to implement this first and have a separate PR for deprecation", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fuser_method_mappings.py", + "ast_data": "FunctionDef name:get_fuser_method_new arg:op_pattern arg:fuser_method_mapping arguments arg arg Assign Call Assign For Assign Call If Compare Compare Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_fontsize", + "source_code": "def set_fontsize(self, fontsize):\n self._fontproperties.set_size(fontsize)\n self.stale = True", + "docstring": "Set the font size. Parameters ---------- fontsize : float or {'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'} If a float, the fontsize in points. The string values denote sizes relative to the default font size. See Also -------- .font_manager.FontProperties.set_size", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:set_fontsize arg:self arg:fontsize arguments arg arg Call Assign" + }, + { + "library": "pandas", + "name": "get_array_op", + "source_code": "def get_array_op(op):\n if isinstance(op, partial):\n return op\n op_name = op.__name__.strip('_').lstrip('r')\n if op_name == 'arith_op':\n return op\n if op_name in {'eq', 'ne', 'lt', 'le', 'gt', 'ge'}:\n return partial(comparison_op, op=op)\n elif op_name in {'and', 'or', 'xor', 'rand', 'ror', 'rxor'}:\n return partial(logical_op, op=op)\n elif op_name in {'add', 'sub', 'mul', 'truediv', 'floordiv', 'mod', 'divmod', 'pow'}:\n return partial(arithmetic_op, op=op)\n else:\n raise NotImplementedError(op_name)", + "docstring": "Return a binary array operation corresponding to the given operator op. Parameters ---------- op : function Binary operator from operator or roperator module. Returns ------- functools.partial", + "type": "function", + "file_path": "pandas\\pandas\\core\\ops\\array_ops.py", + "ast_data": "FunctionDef name:get_array_op arg:op arguments arg If Call Return return:yes Assign Call Call If Compare Return return:yes If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call" + }, + { + "library": "scikit-learn", + "name": "_weighted_cluster_center", + "source_code": "def _weighted_cluster_center(self, X):\n n_clusters = len(set(self.labels_) - {-1, -2})\n mask = np.empty((X.shape[0],), dtype=np.bool_)\n make_centroids = self.store_centers in ('centroid', 'both')\n make_medoids = self.store_centers in ('medoid', 'both')\n if make_centroids:\n self.centroids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64)\n if make_medoids:\n self.medoids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64)\n for idx in range(n_clusters):\n mask = self.labels_ == idx\n data = X[mask]\n strength = self.probabilities_[mask]\n if make_centroids:\n self.centroids_[idx] = np.average(data, weights=strength, axis=0)\n if make_medoids:\n dist_mat = pairwise_distances(data, metric=self.metric, **self._metric_params)\n dist_mat = dist_mat * strength\n medoid_index = np.argmin(dist_mat.sum(axis=1))\n self.medoids_[idx] = data[medoid_index]\n return", + "docstring": "Calculate and store the centroids/medoids of each cluster. This requires to be a raw feature array, not precomputed distances. Rather than return outputs directly, this helper method instead stores them in the attributes. The choice for which attributes are calculated and stored is mediated by the value of . Parameters ---------- X : ndarray of shape (n_samples, n_features) The feature array that the estimator was fit with.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cluster\\_hdbscan\\hdbscan.py", + "ast_data": "FunctionDef name:_weighted_cluster_center arg:self arg:X arguments arg arg Assign Call Call Assign Call Assign Compare Assign Compare If Assign Call If Assign Call For Call Assign Compare Assign Assign If Assign Call If Assign Call Assign Assign Call Call Assign Return return:no" + }, + { + "library": "authlib", + "name": "InvalidScopeError", + "source_code": "class InvalidScopeError(OAuth2Error):\n error = 'invalid_scope'\n description = 'The requested scope is invalid, unknown, or malformed.'", + "docstring": "The requested scope is invalid, unknown, malformed, or exceeds the scope granted by the resource owner.", + "type": "class", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\errors.py", + "ast_data": "ClassDef name:InvalidScopeError Assign Assign" + }, + { + "library": "pytorch", + "name": "save_state", + "source_code": "def save_state(self, filename: str='fuzzer_state.pkl') -> None:\n with open(filename, 'wb') as f:\n pickle.dump({'results': self.results, 'detailed_results': self.detailed_results}, f)", + "docstring": "Save the current fuzzer state to a file", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\fuzzer.py", + "ast_data": "FunctionDef name:save_state arg:self arg:filename arguments arg arg With Call Call" + }, + { + "library": "tensorflow", + "name": "set_memory_growth", + "source_code": "def set_memory_growth(self, dev, enable):\n self._initialize_physical_devices()\n if dev not in self._physical_devices:\n raise ValueError('Unrecognized device: %s' % repr(dev))\n if dev in self._virtual_device_map:\n raise ValueError('Cannot set memory growth on device when virtual devices configured')\n if dev.device_type != 'GPU' and dev not in self._pluggable_devices:\n raise ValueError('Cannot set memory growth on non-GPU and non-Pluggable devices')\n if self._memory_growth_map.get(dev) == enable:\n return\n if self._context_handle is not None:\n raise RuntimeError('Physical devices cannot be modified after being initialized')\n self._memory_growth_map[dev] = enable", + "docstring": "Set if memory growth should be enabled for a PhysicalDevice.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:set_memory_growth arg:self arg:dev arg:enable arguments arg arg arg Call If Compare Raise Call Call If Compare Raise Call If BoolOp Compare Compare Raise Call If Compare Call Return return:no If Compare Raise Call Assign" + }, + { + "library": "sphinx", + "name": "SphinxParallelError", + "source_code": "class SphinxParallelError(SphinxError):\n category = 'Sphinx parallel build error'\n\n def __init__(self, message: str, traceback: str) -> None:\n self.message = message\n self.traceback = traceback\n\n def __str__(self) -> str:\n return self.message", + "docstring": "Sphinx parallel build error.", + "type": "class", + "file_path": "sphinx\\sphinx\\errors.py", + "ast_data": "ClassDef name:SphinxParallelError Assign FunctionDef name:__init__ arg:self arg:message arg:traceback arguments arg arg arg Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes" + }, + { + "library": "authlib", + "name": "JWSObject", + "source_code": "class JWSObject(dict):\n\n def __init__(self, header, payload, type='compact'):\n super().__init__(header=header, payload=payload)\n self.header = header\n self.payload = payload\n self.type = type\n\n @property\n def headers(self):\n if self.type == 'json':\n return self['header']", + "docstring": "A dict instance to represent a JWS object.", + "type": "class", + "file_path": "authlib\\authlib\\jose\\rfc7515\\models.py", + "ast_data": "ClassDef name:JWSObject FunctionDef name:__init__ arg:self arg:header arg:payload arg:type arguments arg arg arg arg Call Call Assign Assign Assign FunctionDef name:headers arg:self arguments arg If Compare Return return:yes" + }, + { + "library": "scikit-learn", + "name": "n_components_", + "source_code": "@property\ndef n_components_(self):\n return self.dictionary.shape[0]", + "docstring": "Number of atoms.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py", + "ast_data": "FunctionDef name:n_components_ arg:self arguments arg Return return:yes" + }, + { + "library": "scrapy", + "name": "_genspider", + "source_code": "def _genspider(self, module: str, name: str, url: str, template_name: str, template_file: str | os.PathLike) -> None:\n tvars = self._generate_template_variables(module, name, url, template_name)\n if self.settings.get('NEWSPIDER_MODULE'):\n spiders_module = import_module(self.settings['NEWSPIDER_MODULE'])\n assert spiders_module.__file__\n spiders_dir = Path(spiders_module.__file__).parent.resolve()\n else:\n spiders_module = None\n spiders_dir = Path()\n spider_file = f'{spiders_dir / module}.py'\n shutil.copyfile(template_file, spider_file)\n render_templatefile(spider_file, **tvars)\n print(f'Created spider {name!r} using template {template_name!r} ', end='' if spiders_module else '\\n')\n if spiders_module:\n print(f'in module:\\n {spiders_module.__name__}.{module}')", + "docstring": "Generate the spider module, based on the given template", + "type": "method", + "file_path": "scrapy\\scrapy\\commands\\genspider.py", + "ast_data": "FunctionDef name:_genspider arg:self arg:module arg:name arg:url arg:template_name arg:template_file arguments arg arg arg arg arg arg Assign Call If Call Assign Call Assign Call Call Assign Assign Call Assign Call Call Call If Call" + }, + { + "library": "pytorch", + "name": "init", + "source_code": "def init(self, state: _FSDPState, root_module: nn.Module, process_group: dist.ProcessGroup) -> None:\n self.process_group = process_group\n self.rank = process_group.rank()\n self.world_size = process_group.size()\n for handle in traversal_utils._get_fsdp_handles(root_module):\n index = len(self.all_handles)\n self.all_handles.append(handle)\n handle._handle_index = index\n self.param_to_fqn = _get_param_to_fqns(root_module)", + "docstring": "Initializes the data structures needed for checking the forward order. This should be called after a root FSDP instance has been set during lazy initialization.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_exec_order_utils.py", + "ast_data": "FunctionDef name:init arg:self arg:state arg:root_module arg:process_group arguments arg arg arg arg Assign Assign Call Assign Call For Call Assign Call Call Assign Assign Call" + }, + { + "library": "pytorch", + "name": "supercedes", + "source_code": "def supercedes(a, b):\n if isvar(b) and (not isvar(a)):\n return True\n s = unify(a, b)\n if s is False:\n return False\n s = {k: v for k, v in s.items() if not isvar(k) or not isvar(v)}\n if reify(a, s) == a:\n return True\n if reify(b, s) == b:\n return False", + "docstring": "``", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\unification\\match.py", + "ast_data": "FunctionDef name:supercedes arg:a arg:b arguments arg arg If BoolOp Call Call Return return:yes Assign Call If Compare Return return:yes Assign Call BoolOp Call Call If Compare Call Return return:yes If Compare Call Return return:yes" + }, + { + "library": "cherrypy", + "name": "__len__", + "source_code": "def __len__(self):\n return len([fname for fname in os.listdir(self.storage_path) if fname.startswith(self.SESSION_PREFIX) and (not fname.endswith(self.LOCK_SUFFIX))])", + "docstring": "Return the number of active sessions.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call Call BoolOp Call Call" + }, + { + "library": "pytorch", + "name": "add", + "source_code": "def add(self, args, kwargs=None):\n assert type(args) is tuple, f'Representative args {args} must be a tuple'\n assert kwargs is None or type(kwargs) is dict, f'Representative kwargs {kwargs} must be None or a dict'\n self._examples.append((args, kwargs))", + "docstring": "Additional input :func: and :func:.", + "type": "method", + "file_path": "pytorch\\torch\\export\\dynamic_shapes.py", + "ast_data": "FunctionDef name:add arg:self arg:args arg:kwargs arguments arg arg arg Compare Call BoolOp Compare Compare Call Call" + }, + { + "library": "cherrypy", + "name": "restart", + "source_code": "def restart(self):\n self.stop()\n self.start()", + "docstring": "Restart the HTTP server.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\servers.py", + "ast_data": "FunctionDef name:restart arg:self arguments arg Call Call" + }, + { + "library": "tensorflow", + "name": "_get_inferred_type", + "source_code": "def _get_inferred_type(self, node, default=None):\n types_ = anno.getanno(node, anno.Static.TYPES, None)\n if not types_:\n print('WARN: no Static.TYPES annotation. Fix the type inference pass: ')\n self.debug_print(node)\n return default\n if len(types_) == 1:\n type_, = types_\n else:\n type_ = types_\n if default is not None and type_ != default:\n print('WARN: type annotation {}({}) does not match {}({})'.format(type_, type(type_), default, type(default)))\n self.debug_print(node)\n return type_", + "docstring": "Return single type or a tuple of types if more than one type.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\tfr_gen.py", + "ast_data": "FunctionDef name:_get_inferred_type arg:self arg:node arg:default arguments arg arg arg Assign Call If Call Call Return return:yes If Compare Call Assign Assign If BoolOp Compare Compare Call Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "nodes_map", + "source_code": "def nodes_map(nodes: list[torch.fx.Node], node_call_back) -> list[torch.fx.Node]:\n for node in nodes:\n node_call_back(node)\n return nodes", + "docstring": "Sequentially visit the nodes list and invoke node_call_back on each element. Returns the nodes list after the node_call_back is invoked on each element.", + "type": "function", + "file_path": "pytorch\\torch\\_export\\utils.py", + "ast_data": "FunctionDef name:nodes_map arg:nodes arg:node_call_back arguments arg arg For Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_detector_name", + "source_code": "def get_detector_name(self) -> str:\n return 'input_weight_equalization_detector'", + "docstring": "Returns the name of this detector", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py", + "ast_data": "FunctionDef name:get_detector_name arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_update_docstring_with_api_list", + "source_code": "def _update_docstring_with_api_list(target, api_list):\n lines = []\n for func in api_list:\n name = tf_export_lib.get_canonical_name_for_symbol(func, add_prefix_to_v1_names=True)\n if name is not None:\n params = tf_inspect.signature(func).parameters.keys()\n lines.append(f' * `tf.{name}({', '.join(params)})`')\n lines.sort()\n target.__doc__ = target.__doc__.replace(' <>', '\\n'.join(lines))", + "docstring": "Replaces in target.__doc__ with the given list of APIs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py", + "ast_data": "FunctionDef name:_update_docstring_with_api_list arg:target arg:api_list arguments arg arg Assign For Assign Call If Compare Assign Call Call Call Call Call Assign Call Call" + }, + { + "library": "scipy", + "name": "_lazyselect", + "source_code": "def _lazyselect(condlist, choicelist, arrays, default=0):\n arrays = np.broadcast_arrays(*arrays)\n tcode = np.mintypecode([a.dtype.char for a in arrays])\n out = np.full(np.shape(arrays[0]), fill_value=default, dtype=tcode)\n for func, cond in zip(choicelist, condlist):\n if np.all(cond is False):\n continue\n cond, _ = np.broadcast_arrays(cond, arrays[0])\n temp = tuple((np.extract(cond, arr) for arr in arrays))\n np.place(out, cond, func(*temp))\n return out", + "docstring": "Mimic . Notice, it assumes that all are of the same shape or can be broadcasted together. All functions in must accept array arguments in the order given in and must return an array of the same shape as broadcasted . Examples -------- >>> import numpy as np >>> x = np.arange(6) >>> np.select([x 3], [x**2, x**3], default=0) array([ 0, 1, 4, 0, 64, 125]) >>> _lazyselect([x 3], [lambda x: x**2, lambda x: x**3], (x,)) array([ 0., 1., 4., 0., 64., 125.]) >>> a = -np.ones_like(x) >>> _lazyselect([x 3], ... [lambda x, a: x**2, lambda x, a: a * x**3], ... (x, a), default=np.nan) array([ 0., 1., 4., nan, -64., -125.])", + "type": "function", + "file_path": "scipy\\scipy\\_lib\\_util.py", + "ast_data": "FunctionDef name:_lazyselect arg:condlist arg:choicelist arg:arrays arg:default arguments arg arg arg arg Assign Call Assign Call Assign Call Call For Call If Call Compare Assign Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "setup", + "source_code": "def setup(ax, title):\n ax.yaxis.set_major_locator(ticker.NullLocator())\n ax.spines[['left', 'right', 'top']].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.tick_params(which='major', width=1.0, length=5)\n ax.tick_params(which='minor', width=0.75, length=2.5)\n ax.set_xlim(0, 5)\n ax.set_ylim(0, 1)\n ax.text(0.0, 0.2, title, transform=ax.transAxes, fontsize=14, fontname='Monospace', color='tab:blue')", + "docstring": "Set up common parameters for the Axes in the example.", + "type": "function", + "file_path": "matplotlib\\galleries\\users_explain\\axes\\axes_ticks.py", + "ast_data": "FunctionDef name:setup arg:ax arg:title arguments arg arg Call Call Call Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "mean", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef mean(x, axis=None, keepdims=False):\n if x.dtype.base_dtype == dtypes_module.bool:\n x = math_ops.cast(x, floatx())\n return math_ops.reduce_mean(x, axis, keepdims)", + "docstring": "Mean of a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: A list of integer. Axes to compute the mean. keepdims: A boolean, whether to keep the dimensions or not. If is , the rank of the tensor is reduced by 1 for each entry in . If is , the reduced dimensions are retained with length 1. Returns: A tensor with the mean of elements of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:mean arg:x arg:axis arg:keepdims arguments arg arg arg If Compare Assign Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "inverse", + "source_code": "def inverse(self, input: Tensor, params: Optional[List[ParamItem]]=None, extra_args: Optional[Dict[str, Any]]=None) -> Tensor:\n if params is None:\n if self._params is None:\n raise ValueError('No parameters available for inversing, please run a forward pass first or passing valid params into this function.')\n params = self._params\n input = self.inverse_inputs(input, params, extra_args=extra_args)\n return input", + "docstring": "Inverse transformation. Used to inverse a tensor according to the performed transformation by a forward pass, or with respect to provided parameters.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\base.py", + "ast_data": "FunctionDef name:inverse arg:self arg:input arg:params arg:extra_args arguments arg arg arg arg If Compare If Compare Raise Call Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_create_variables_and_slots", + "source_code": "def _create_variables_and_slots(self) -> Dict[Text, Dict[Text, tf_variables.Variable]]:\n variables = {}\n for table in self._table_config:\n variables[table.name] = self._create_variables(table, trainable=True)\n return variables", + "docstring": "Create variables for TPU embeddings. Note that this will always ensure that the variable is created under the TPUStrategy. Returns: A dict of dicts. The outer dict is keyed by the table names and the inner dicts are keyed by 'parameters' and the slot variable names.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v1.py", + "ast_data": "FunctionDef name:_create_variables_and_slots arg:self arguments arg Assign For Assign Call Return return:yes" + }, + { + "library": "django", + "name": "_create_index_name", + "source_code": "def _create_index_name(self, table_name, column_names, suffix=''):\n _, table_name = split_identifier(table_name)\n hash_suffix_part = '%s%s' % (names_digest(table_name, *column_names, length=8), suffix)\n max_length = self.connection.ops.max_name_length() or 200\n index_name = '%s_%s_%s' % (table_name, '_'.join(column_names), hash_suffix_part)\n if len(index_name) <= max_length:\n return index_name\n if len(hash_suffix_part) > max_length / 3:\n hash_suffix_part = hash_suffix_part[:max_length // 3]\n other_length = (max_length - len(hash_suffix_part)) // 2 - 1\n index_name = '%s_%s_%s' % (table_name[:other_length], '_'.join(column_names)[:other_length], hash_suffix_part)\n if index_name[0] == '_' or index_name[0].isdigit():\n index_name = 'D%s' % index_name[:-1]\n return index_name", + "docstring": "Generate a unique name for an index/unique constraint. The name is divided into 3 parts: the table name, the column names, and a unique digest and suffix.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\schema.py", + "ast_data": "FunctionDef name:_create_index_name arg:self arg:table_name arg:column_names arg:suffix arguments arg arg arg arg Assign Call Assign Call Assign BoolOp Call Assign Call If Compare Call Return return:yes If Compare Call Assign Assign Call Assign Call If BoolOp Compare Call Assign Return return:yes" + }, + { + "library": "matplotlib", + "name": "_connect_picklable", + "source_code": "def _connect_picklable(self, signal, func):\n cid = self.connect(signal, func)\n self._pickled_cids.add(cid)\n return cid", + "docstring": "Like , but the callback is kept when pickling/unpickling. Currently internal-use only.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:_connect_picklable arg:self arg:signal arg:func arguments arg arg arg Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_TfDeviceCaptureOp", + "source_code": "class _TfDeviceCaptureOp:\n\n def __init__(self):\n self.device = None\n\n def _set_device(self, device):\n if isinstance(device, device_spec.DeviceSpecV2):\n device = device.to_string()\n self.device = device\n\n def _set_device_from_string(self, device_str):\n self.device = device_str", + "docstring": "Class for capturing the TF device scope.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "ClassDef name:_TfDeviceCaptureOp FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:_set_device arg:self arg:device arguments arg arg If Call Assign Call Assign FunctionDef name:_set_device_from_string arg:self arg:device_str arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "_write_op_list_section", + "source_code": "def _write_op_list_section(self, graph_order):\n self._write_report('%s %s\\n' % (_MARKER_SECTION_BEGIN, _SECTION_NAME_OP_LIST))\n self._write_report('%s %d\\n' % (_FIELD_NAME_NUM_OPS, len(graph_order.operations)))\n for i in range(0, len(graph_order.operations)):\n op = graph_order.operations[i]\n line = '%d \"%s\" %s' % (i, op.name, op.type)\n for out_tensor in op.outputs:\n if out_tensor.name not in graph_order.tensor_to_idx:\n raise ValueError('out_tensor is not in tensor_to_idx. out_tensor={}, tensor_to_idx={}'.format(out_tensor.name, graph_order.tensor_to_idx))\n line += ' %d' % graph_order.tensor_to_idx[out_tensor.name]\n line += '\\n'\n self._write_report(line)\n self._write_report('%s %s\\n' % (_MARKER_SECTION_END, _SECTION_NAME_OP_LIST))", + "docstring": "Writes the Op-list section of the report.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py", + "ast_data": "FunctionDef name:_write_op_list_section arg:self arg:graph_order arguments arg arg Call Call Call For Call Call Assign Assign For If Compare Raise Call Call Call Call" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self, bus, httpserver=None, bind_addr=None):\n self.bus = bus\n self.httpserver = httpserver\n self.bind_addr = bind_addr\n self.interrupt = None\n self.running = False", + "docstring": "Initialize the HTTP server plugin.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\process\\servers.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:bus arg:httpserver arg:bind_addr arguments arg arg arg arg Assign Assign Assign Assign Assign" + }, + { + "library": "tensorflow", + "name": "_parse_input_meta_graph_proto", + "source_code": "def _parse_input_meta_graph_proto(input_graph: str, input_binary: bool) -> meta_graph_pb2.MetaGraphDef:\n if not gfile.Exists(input_graph):\n raise IOError(\"Input meta graph file '\" + input_graph + \"' does not exist!\")\n input_meta_graph_def = meta_graph_pb2.MetaGraphDef()\n mode = 'rb' if input_binary else 'r'\n with gfile.GFile(input_graph, mode) as f:\n if input_binary:\n input_meta_graph_def.ParseFromString(f.read())\n else:\n text_format.Merge(f.read(), input_meta_graph_def)\n print(\"Loaded meta graph file '\" + input_graph)\n return input_meta_graph_def", + "docstring": "Parses input tensorflow graph into MetaGraphDef proto.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\freeze_graph.py", + "ast_data": "FunctionDef name:_parse_input_meta_graph_proto arg:input_graph arg:input_binary arguments arg arg If Call Raise Call Assign Call Assign With Call If Call Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "do_not_descend_map", + "source_code": "@property\ndef do_not_descend_map(self):\n return self._do_not_descend_map", + "docstring": "A map from parents to symbols that should not be descended into. This map can be edited, but it should not be edited once traversal has begun. Returns: The map marking symbols to not explore.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\common\\public_api.py", + "ast_data": "FunctionDef name:do_not_descend_map arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_radius", + "source_code": "def get_radius(self):\n return self.width / 2.0", + "docstring": "Return the radius of the circle.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:get_radius arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "save_module", + "source_code": "def save_module(self, module_name: str, dependencies=True):\n if not isinstance(module_name, str):\n raise TypeError('save_module() expects a string input, did you perhaps mean to pass `__name__`?')\n self._intern_module(module_name, dependencies)", + "docstring": "Save the code for ``, we scan the source for dependencies.", + "type": "method", + "file_path": "pytorch\\torch\\package\\package_exporter.py", + "ast_data": "FunctionDef name:save_module arg:self arg:module_name arg:dependencies arguments arg arg arg If Call Raise Call Call" + }, + { + "library": "django", + "name": "__init__", + "source_code": "def __init__(self, *args, **kwargs):\n if len(args) == 1:\n if isinstance(args[0], (tuple, list)):\n init_geoms = args[0]\n else:\n init_geoms = args\n else:\n init_geoms = args\n self._check_allowed(init_geoms)\n collection = self._create_collection(len(init_geoms), init_geoms)\n super().__init__(collection, **kwargs)", + "docstring": "Initialize a Geometry Collection from a sequence of Geometry objects.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\collections.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg If Compare Call If Call Assign Assign Assign Call Assign Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "rejection_resample", + "source_code": "@deprecation.deprecated(None, 'Use `tf.data.Dataset.rejection_resample(...)`.')\n@tf_export('data.experimental.rejection_resample')\ndef rejection_resample(class_func, target_dist, initial_dist=None, seed=None):\n\n def _apply_fn(dataset):\n return dataset.rejection_resample(class_func=class_func, target_dist=target_dist, initial_dist=initial_dist, seed=seed)\n return _apply_fn", + "docstring": "A transformation that resamples a dataset to achieve a target distribution. **NOTE** Resampling is performed via rejection sampling; some fraction of the input values will be dropped. Args: class_func: A function mapping an element of the input dataset to a scalar tensor. Values should be in . target_dist: A floating point type tensor, shaped . initial_dist: (Optional.) A floating point type tensor, shaped . If not provided, the true class distribution is estimated live in a streaming fashion. seed: (Optional.) Python integer seed for the resampler. Returns: A transformation function, which can be passed to .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\resampling.py", + "ast_data": "FunctionDef name:rejection_resample arg:class_func arg:target_dist arg:initial_dist arg:seed arguments arg arg arg arg FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "impulse", + "source_code": "def impulse(system, X0=None, T=None, N=None):\n if isinstance(system, lti):\n sys = system._as_ss()\n elif isinstance(system, dlti):\n raise AttributeError('impulse can only be used with continuous-time systems.')\n else:\n sys = lti(*system)._as_ss()\n if X0 is None:\n X = squeeze(sys.B)\n else:\n X = squeeze(sys.B + X0)\n if N is None:\n N = 100\n if T is None:\n T = _default_response_times(sys.A, N)\n else:\n T = asarray(T)\n _, h, _ = lsim(sys, 0.0, T, X, interp=False)\n return (T, h)", + "docstring": "Impulse response of continuous-time system. Parameters ---------- system : an instance of the LTI class or a tuple of array_like describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of ) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) X0 : array_like, optional Initial state-vector. Defaults to zero. T : array_like, optional Time points. Computed if not given. N : int, optional The number of time points to compute (if is not given). Returns ------- T : ndarray A 1-D array of time points. yout : ndarray A 1-D array containing the impulse response of the system (except for singularities at zero). Notes ----- If (num, den) is passed in for `` >>> from scipy import signal >>> system = ([1.0], [1.0, 2.0, 1.0]) >>> t, y = signal.impulse(system) >>> import matplotlib.pyplot as plt >>> plt.plot(t, y)", + "type": "function", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:impulse arg:system arg:X0 arg:T arg:N arguments arg arg arg arg If Call Assign Call If Call Raise Call Assign Call Call If Compare Assign Call Assign Call If Compare Assign If Compare Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "dtype", + "source_code": "@property\ndef dtype(self):\n return self._row_splits.dtype", + "docstring": "The used to encode the row partition (either int32 or int64).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", + "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "connect", + "source_code": "@async_unsafe\ndef connect(self):\n self.check_settings()\n self.in_atomic_block = False\n self.savepoint_ids = []\n self.atomic_blocks = []\n self.needs_rollback = False\n self.health_check_enabled = self.settings_dict['CONN_HEALTH_CHECKS']\n max_age = self.settings_dict['CONN_MAX_AGE']\n self.close_at = None if max_age is None else time.monotonic() + max_age\n self.closed_in_transaction = False\n self.errors_occurred = False\n self.health_check_done = True\n conn_params = self.get_connection_params()\n self.connection = self.get_new_connection(conn_params)\n self.set_autocommit(self.settings_dict['AUTOCOMMIT'])\n self.init_connection_state()\n connection_created.send(sender=self.__class__, connection=self)\n self.run_on_commit = []", + "docstring": "Connect to the database. Assume that the connection is closed.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\base.py", + "ast_data": "FunctionDef name:connect arg:self arguments arg Call Assign Assign Assign Assign Assign Assign Assign Compare Call Assign Assign Assign Assign Call Assign Call Call Call Call Assign" + }, + { + "library": "tensorflow", + "name": "extend", + "source_code": "def extend(self, values):\n for value in values:\n self.append(value)", + "docstring": "Add a sequence of trackable values.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py", + "ast_data": "FunctionDef name:extend arg:self arg:values arguments arg arg For Call" + }, + { + "library": "kornia", + "name": "distort_points_kannala_brandt", + "source_code": "def distort_points_kannala_brandt(projected_points_in_camera_z1_plane: Tensor, params: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(projected_points_in_camera_z1_plane, ['*', '2'])\n KORNIA_CHECK_SHAPE(params, ['*', '8'])\n x = projected_points_in_camera_z1_plane[..., 0]\n y = projected_points_in_camera_z1_plane[..., 1]\n radius_sq = x ** 2 + y ** 2\n distorted_points = ops.where(radius_sq[..., None] > 1e-08, _distort_points_kannala_brandt_impl(projected_points_in_camera_z1_plane, params), distort_points_affine(projected_points_in_camera_z1_plane, params[..., :4]))\n return distorted_points", + "docstring": "Distort points from the canonical z=1 plane into the camera frame using the Kannala-Brandt model. Args: projected_points_in_camera_z1_plane: Tensor representing the points to distort with shape (..., 2). params: Tensor representing the parameters of the Kannala-Brandt distortion model with shape (..., 8). Returns: Tensor representing the distorted points with shape (..., 2). Example: >>> points = torch.tensor([319.5, 239.5]) # center of a 640x480 image >>> params = torch.tensor([1000.0, 1000.0, 320.0, 280.0, 0.1, 0.01, 0.001, 0.0001]) >>> distort_points_kannala_brandt(points, params) tensor([1982.6832, 1526.3619])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\camera\\distortion_kannala_brandt.py", + "ast_data": "FunctionDef name:distort_points_kannala_brandt arg:projected_points_in_camera_z1_plane arg:params arguments arg arg Call Call Assign Assign Assign Assign Call Compare Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "set_as_default", + "source_code": "def set_as_default(self, step=None):\n self.as_default(step).__enter__()", + "docstring": "Enables this summary writer for the current thread. For convenience, if is not None, this function also sets a default value for the parameter used in summary-writing functions elsewhere in the API so that it need not be explicitly passed in every such invocation. The value can be a constant or a variable. Note: when setting in a @tf.function, the step value will be captured at the time the function is traced, so changes to the step outside the function will not be reflected inside the function unless using a step. Args: step: An -castable default step value, or . When not , the current step is modified to the given value. When , the current step is not modified.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "FunctionDef name:set_as_default arg:self arg:step arguments arg arg Call Call" + }, + { + "library": "pytorch", + "name": "VarDispatcher", + "source_code": "class VarDispatcher(Dispatcher):\n\n def __call__(self, *args, **kwargs):\n func, s = self.resolve(args)\n d = {k.token: v for k, v in s.items()}\n return func(**d)", + "docstring": "A dispatcher that calls functions with variable names >>> # xdoctest: +SKIP >>> d = VarDispatcher(\"d\") >>> x = var(\"x\") >>> @d.register(\"inc\", x) ... def f(x): ... return x + 1 >>> @d.register(\"double\", x) ... def f(x): ... return x * 2 >>> d(\"inc\", 10) 11 >>> d(\"double\", 10) 20", + "type": "class", + "file_path": "pytorch\\torch\\fx\\experimental\\unification\\match.py", + "ast_data": "ClassDef name:VarDispatcher FunctionDef name:__call__ arg:self arguments arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "fprati", + "source_code": "def fprati(p1, f1, p2, f2, p3, f3):\n h1 = f1 * (f2 - f3)\n h2 = f2 * (f3 - f1)\n h3 = f3 * (f1 - f2)\n if p3 == np.inf:\n return -(p2 * h1 + p1 * h2) / h3\n return -(p1 * p2 * h3 + p2 * p3 * h1 + p1 * p3 * h2) / (p1 * h1 + p2 * h2 + p3 * h3)", + "docstring": "The root of r(p) = (u*p + v) / (p + w) given three points and values, (p1, f2), (p2, f2) and (p3, f3). The FITPACK analog adjusts the bounds, and we do not NB: FITPACK uses p < 0 to encode p=infinity. We just use the infinity itself. Since the bracket is ``).", + "type": "function", + "file_path": "scipy\\scipy\\interpolate\\_fitpack_repro.py", + "ast_data": "FunctionDef name:fprati arg:p1 arg:f1 arg:p2 arg:f2 arg:p3 arg:f3 arguments arg arg arg arg arg arg Assign Assign Assign If Compare Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "index_to_coordinate", + "source_code": "def index_to_coordinate(index, extent, origin):\n left, right, bottom, top = extent\n hshift = 0.5 * np.sign(right - left)\n left, right = (left + hshift, right - hshift)\n vshift = 0.5 * np.sign(top - bottom)\n bottom, top = (bottom + vshift, top - vshift)\n if origin == 'upper':\n bottom, top = (top, bottom)\n return {'[0, 0]': (left, bottom), \"[M', 0]\": (left, top), \"[0, N']\": (right, bottom), \"[M', N']\": (right, top)}[index]", + "docstring": "Return the pixel center of an index.", + "type": "function", + "file_path": "matplotlib\\galleries\\users_explain\\artists\\imshow_extent.py", + "ast_data": "FunctionDef name:index_to_coordinate arg:index arg:extent arg:origin arguments arg arg arg Assign Assign Call Assign Assign Call Assign If Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "read", + "source_code": "def read(self, filename: str) -> None:\n with open(filename, mode='r', encoding='utf-8') as f:\n data = json.load(f)\n self._docs.update((ExportedDoc.create(**d) for d in data['docs']))\n self._symbols.update((ExportedSymbol.create(**s) for s in data['symbols']))", + "docstring": "Reads exports from filename.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\shared\\exported_api.py", + "ast_data": "FunctionDef name:read arg:self arg:filename arguments arg arg With Call Assign Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "ConcreteFunctionGarbageCollector", + "source_code": "class ConcreteFunctionGarbageCollector:\n __slots__ = ['_func_graph']\n\n def __init__(self, func_graph):\n self._func_graph = func_graph\n\n def release(self):\n self._func_graph = None\n\n def __del__(self):\n if func_graph_module is None or self._func_graph is None:\n return\n try:\n func_graph_module.dismantle_func_graph(self._func_graph)\n except:\n pass", + "docstring": "Cleans up reference cycles when a goes out of scope.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", + "ast_data": "ClassDef name:ConcreteFunctionGarbageCollector Assign FunctionDef name:__init__ arg:self arg:func_graph arguments arg arg Assign FunctionDef name:release arg:self arguments arg Assign FunctionDef name:__del__ arg:self arguments arg If BoolOp Compare Compare Return return:no Try Call ExceptHandler" + }, + { + "library": "tensorflow", + "name": "_update_forward_compatibility_date_number", + "source_code": "def _update_forward_compatibility_date_number(date_to_override=None):\n global _FORWARD_COMPATIBILITY_DATE_NUMBER\n if date_to_override:\n date = date_to_override\n else:\n date = _FORWARD_COMPATIBILITY_HORIZON\n delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)\n if delta_days:\n date += datetime.timedelta(days=int(delta_days))\n if date < _FORWARD_COMPATIBILITY_HORIZON:\n logging.warning('Trying to set the forward compatibility date to the past date %s. This will be ignored by TensorFlow.' % date)\n return\n _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(date.year, date.month, date.day)", + "docstring": "Update the base date to compare in forward_compatible function.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\compat\\compat.py", + "ast_data": "FunctionDef name:_update_forward_compatibility_date_number arg:date_to_override arguments arg If Assign Assign Assign Call If Call Call If Compare Call Return return:no Assign Call" + }, + { + "library": "scikit-learn", + "name": "_process_decision_function", + "source_code": "def _process_decision_function(*, y_pred, target_type, classes, pos_label):\n if target_type == 'binary' and pos_label == classes[0]:\n return -1 * y_pred\n return y_pred", + "docstring": "Get the response values when the response method is . This function process the array in the binary and multi-label cases. In the binary case, it inverts the sign of the score if the positive label is not . In the multi-label case, it stacks the predictions if they are not in the \"compressed\" format . Parameters ---------- y_pred : ndarray Output of . The shape depends on the target type: - for binary classification, it is a 1d array of shape where the sign is assuming that is the positive class; - for multiclass classification, it is a 2d array of shape ; - for multilabel classification, it is a 2d array of shape . target_type : {\"binary\", \"multiclass\", \"multilabel-indicator\"} Type of the target. classes : ndarray of shape (n_classes,) or list of such arrays Class labels as reported by . pos_label : int, float, bool or str Only used with binary and multiclass targets. Returns ------- y_pred : ndarray of shape (n_samples,), (n_samples, n_classes) or (n_samples, n_output) Compressed predictions format as requested by the metrics.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_response.py", + "ast_data": "FunctionDef name:_process_decision_function arguments arg arg arg arg If BoolOp Compare Compare Return return:yes Return return:yes" + }, + { + "library": "scikit-learn", + "name": "get_meson_info", + "source_code": "def get_meson_info():\n build_path = Path('build/introspect')\n subprocess.check_call(['meson', 'setup', build_path, '--reconfigure'])\n json_out = subprocess.check_output(['meson', 'introspect', build_path, '--targets'], text=True)\n target_list = json.loads(json_out)\n meson_targets = [target for target in target_list if has_openmp_flags(target)]\n return [get_canonical_name_meson(each, build_path) for each in meson_targets]", + "docstring": "Return names of extension that use OpenMP based on meson introspect output. The meson introspect json info is a list of targets where a target is a dict that looks like this (parts not used in this script are not shown for simplicity): { 'name': '_k_means_elkan.cpython-312-x86_64-linux-gnu', 'filename': [ '/sklearn/cluster/_k_means_elkan.cpython-312-x86_64-linux-gnu.so' ], 'target_sources': [ { 'compiler': ['ccache', 'cc'], 'parameters': [ '-Wall', '-std=c11', '-fopenmp', ... ], ... }, { 'linker': ['cc'], 'parameters': [ '-shared', '-fPIC', '-fopenmp', ... ] } ] }", + "type": "function", + "file_path": "scikit-learn\\build_tools\\check-meson-openmp-dependencies.py", + "ast_data": "FunctionDef name:get_meson_info arguments Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "Concat", + "source_code": "class Concat(Func):\n function = None\n template = '%(expressions)s'\n\n def __init__(self, *expressions, **extra):\n if len(expressions) < 2:\n raise ValueError('Concat must take at least two expressions')\n paired = self._paired(expressions, output_field=extra.get('output_field'))\n super().__init__(paired, **extra)\n\n def _paired(self, expressions, output_field):\n if len(expressions) == 2:\n return ConcatPair(*expressions, output_field=output_field)\n return ConcatPair(expressions[0], self._paired(expressions[1:], output_field=output_field), output_field=output_field)", + "docstring": "Concatenate text fields together. Backends that result in an entire null expression when any arguments are null will wrap each argument in coalesce functions to ensure a non-null result.", + "type": "class", + "file_path": "django\\django\\db\\models\\functions\\text.py", + "ast_data": "ClassDef name:Concat Assign Assign FunctionDef name:__init__ arg:self arguments arg arg arg If Compare Call Raise Call Assign Call Call Call Call FunctionDef name:_paired arg:self arg:expressions arg:output_field arguments arg arg arg If Compare Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self, dispatch_method_name=None, translate=punctuation_to_underscores):\n validate_translator(translate)\n self.translate = translate\n if dispatch_method_name:\n self.dispatch_method_name = dispatch_method_name", + "docstring": "Initialize the HTTP request dispatcher.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cpdispatch.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:dispatch_method_name arg:translate arguments arg arg arg Call Assign If Assign" + }, + { + "library": "scipy", + "name": "write", + "source_code": "def write(self, arr):\n mat_tag_pos = self.file_stream.tell()\n if scipy.sparse.issparse(arr):\n self.write_sparse(arr)\n self.update_matrix_tag(mat_tag_pos)\n return\n narr = to_writeable(arr)\n if narr is None:\n raise TypeError(f'Could not convert {arr} (type {type(arr)}) to array')\n if isinstance(narr, MatlabObject):\n self.write_object(narr)\n elif isinstance(narr, MatlabFunction):\n raise MatWriteError('Cannot write matlab functions')\n elif narr is EmptyStructMarker:\n self.write_empty_struct()\n elif narr.dtype.fields:\n self.write_struct(narr)\n elif narr.dtype.hasobject:\n self.write_cells(narr)\n elif narr.dtype.kind in ('U', 'S'):\n if self.unicode_strings:\n codec = 'UTF8'\n else:\n codec = 'ascii'\n self.write_char(narr, codec)\n else:\n self.write_numeric(narr)\n self.update_matrix_tag(mat_tag_pos)", + "docstring": "Write to stream at top and sub levels Parameters ---------- arr : array_like array-like object to create writer for", + "type": "method", + "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py", + "ast_data": "FunctionDef name:write arg:self arg:arr arguments arg arg Assign Call If Call Call Call Return return:no Assign Call If Compare Raise Call Call If Call Call If Call Raise Call If Compare Call If Call If Call If Compare If Assign Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "mask_loads", + "source_code": "@contextlib.contextmanager\ndef mask_loads(self, mask: Union[str, OpsWrapper], value: Union[int, float]) -> Iterator[str]:\n prior = self._load_mask\n prior_val = self._load_other\n if prior:\n mask = ops.logical_and(mask, prior)\n mask = OpsWrapper._unwrap(mask)\n self._load_mask = mask\n self._load_other = value\n try:\n yield mask\n finally:\n self._load_mask = prior\n self._load_other = prior_val", + "docstring": "Context manager to add an additional mask to tl.load/store", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py", + "ast_data": "FunctionDef name:mask_loads arg:self arg:mask arg:value arguments arg arg arg Assign Assign If Assign Call Assign Call Assign Assign Try Assign Assign" + }, + { + "library": "tensorflow", + "name": "can_ignore", + "source_code": "def can_ignore(self, node):\n ast_node = node.ast_node\n if anno.hasanno(ast_node, anno.Basic.SKIP_PROCESSING):\n return True\n return isinstance(ast_node, (gast.Break, gast.Continue, gast.Raise, gast.Pass))", + "docstring": "Returns True if the node can safely be assumed not to touch variables.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py", + "ast_data": "FunctionDef name:can_ignore arg:self arg:node arguments arg arg Assign If Call Return return:yes Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_mutation_aspect", + "source_code": "def get_mutation_aspect(self):\n return self._mutation_aspect if self._mutation_aspect is not None else 1", + "docstring": "Return the aspect ratio of the bbox mutation.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:get_mutation_aspect arg:self arguments arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "cluster_spec", + "source_code": "@abc.abstractmethod\ndef cluster_spec(self):\n raise NotImplementedError()", + "docstring": "Retrieve the current state of the cluster and return a . Returns: A representing the state of the cluster at the moment this function is called. Implementors of this function must take care in ensuring that the ClusterSpec returned is up-to-date at the time of calling this function. This usually means retrieving the information from the underlying cluster management system every time this function is invoked and reconstructing a cluster_spec, rather than attempting to cache anything.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py", + "ast_data": "FunctionDef name:cluster_spec arg:self arguments arg Raise Call" + }, + { + "library": "django", + "name": "add_arguments", + "source_code": "def add_arguments(self, parser):\n pass", + "docstring": "Entry point for subclassed commands to add custom arguments.", + "type": "method", + "file_path": "django\\django\\core\\management\\base.py", + "ast_data": "FunctionDef name:add_arguments arg:self arg:parser arguments arg arg" + }, + { + "library": "numpy", + "name": "symbols", + "source_code": "def symbols(self):\n found = set()\n\n def visit(expr, found=found):\n if expr.op is Op.SYMBOL:\n found.add(expr)\n self.traverse(visit)\n return found", + "docstring": "Return a set of symbols contained in self.", + "type": "method", + "file_path": "numpy\\numpy\\f2py\\symbolic.py", + "ast_data": "FunctionDef name:symbols arg:self arguments arg Assign Call FunctionDef name:visit arg:expr arg:found arguments arg arg If Compare Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "write", + "source_code": "def write(self, file_path: str | Path) -> None:\n data = self.data\n if self.channels_order == ChannelsOrder.CHANNELS_LAST:\n data = data.permute(2, 0, 1)\n write_image(file_path, data)", + "docstring": "Write the image to a file. For now, only support writing to JPEG format. Args: file_path: the path to the file to write the image to. Example: >>> data = np.ones((4, 5, 3), dtype=np.uint8) # HxWxC >>> img = Image.from_numpy(data) >>> img.write(\"test.jpg\")", + "type": "method", + "file_path": "kornia\\kornia\\image\\image.py", + "ast_data": "FunctionDef name:write arg:self arg:file_path arguments arg arg Assign If Compare Assign Call Call" + }, + { + "library": "scikit-learn", + "name": "load_diabetes", + "source_code": "@validate_params({'return_X_y': ['boolean'], 'as_frame': ['boolean'], 'scaled': ['boolean']}, prefer_skip_nested_validation=True)\ndef load_diabetes(*, return_X_y=False, as_frame=False, scaled=True):\n data_filename = 'diabetes_data_raw.csv.gz'\n target_filename = 'diabetes_target.csv.gz'\n data = load_gzip_compressed_csv_data(data_filename)\n target = load_gzip_compressed_csv_data(target_filename)\n if scaled:\n data = scale(data, copy=False)\n data /= data.shape[0] ** 0.5\n fdescr = load_descr('diabetes.rst')\n feature_names = ['age', 'sex', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6']\n frame = None\n target_columns = ['target']\n if as_frame:\n frame, data, target = _convert_data_dataframe('load_diabetes', data, target, feature_names, target_columns)\n if return_X_y:\n return (data, target)\n return Bunch(data=data, target=target, frame=frame, DESCR=fdescr, feature_names=feature_names, data_filename=data_filename, target_filename=target_filename, data_module=DATA_MODULE)", + "docstring": "Load and return the diabetes dataset (regression). ============== ================== Samples total 442 Dimensionality 10 Features real, -.2 datatargetreturn_X_ydatatargetn_samples~sklearn.utils.Bunchas_frame=Truedataas_frame=Truetargetas_frame=Truedatatarget` is True Returns a tuple of two ndarray of shape (n_samples, n_features) A 2D array with each row representing one sample and each column representing the features and/or target of a given sample. .. versionadded:: 0.18 Examples -------- >>> from sklearn.datasets import load_diabetes >>> diabetes = load_diabetes() >>> diabetes.target[:3] array([151., 75., 141.]) >>> diabetes.data.shape (442, 10)", + "type": "function", + "file_path": "scikit-learn\\sklearn\\datasets\\_base.py", + "ast_data": "FunctionDef name:load_diabetes arguments arg arg arg Assign Assign Assign Call Assign Call If Assign Call Assign Call Assign Assign Assign If Assign Call If Return return:yes Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "reduce_sum", + "source_code": "def reduce_sum(tensors):\n return _apply_reduce('sum', tensors)", + "docstring": "Returns a tensor with the reduce sum across . The computation is done with a reduce operation, so only one tensor is returned. Args: tensors: The input tensors across which to sum; must be assigned to GPU devices. Returns: A tensor containing the sum of the input tensors. Raises: LookupError: If context is not currently using a GPU device.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nccl_ops.py", + "ast_data": "FunctionDef name:reduce_sum arg:tensors arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "new_func", + "source_code": "@functools.wraps(func)\ndef new_func(*args, **kwargs):\n if args:\n raise ValueError(f'The function {func.__name__} only accepts keyword arguments. Do not pass positional arguments. Received the following positional arguments: {args}')\n return func(**kwargs)", + "docstring": "Keyword args only wrapper.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\keyword_args.py", + "ast_data": "FunctionDef name:new_func arguments arg arg If Raise Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "parse", + "source_code": "@staticmethod\ndef parse(s, language=Language.C):\n return fromstring(s, language=language)", + "docstring": "Parse a Fortran expression to a Expr.", + "type": "method", + "file_path": "numpy\\numpy\\f2py\\symbolic.py", + "ast_data": "FunctionDef name:parse arg:s arg:language arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "compute_graph_partition_maps", + "source_code": "def compute_graph_partition_maps(self, signatures: list[GraphPartitionSignature]) -> None:\n name_to_graph_input_index = {name: idx for idx, name in enumerate(V.graph.graph_inputs)}\n name_to_graph_output_index = {name: idx for idx, name in enumerate(V.graph.get_output_names())}\n V.graph.partition_maps = []\n for partition_id, signature in enumerate(signatures):\n if signature.skip_cudagraph:\n continue\n input_mapping = []\n for name in signature.input_nodes:\n input_mapping.append(name_to_graph_input_index.get(name))\n output_mapping = []\n for node in signature.output_nodes:\n output_mapping.append(name_to_graph_output_index.get(node.get_name()))\n V.graph.partition_maps.append(GraphPartitionMap(partition_id, input_mapping, output_mapping, signature.constant_names))", + "docstring": "computes a mapping from partition input/output indices to graph input/output indices for each partition.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:compute_graph_partition_maps arg:self arg:signatures arguments arg arg Assign Call Assign Call Call Assign For Call If Assign For Call Call Assign For Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "entry_to_pretty_str", + "source_code": "def entry_to_pretty_str(entry) -> str:\n s = '{\\n'\n if 'pattern' in entry:\n pattern_str = pattern_to_human_readable(entry['pattern'])\n s += f\" 'pattern': {pattern_str},\\n\"\n if 'dtype_configs' in entry:\n s += \" 'dtype_configs': [\\n\"\n for dtype_config in entry['dtype_configs']:\n s += ' {\\n'\n for k, v in dtype_config.items():\n s += f\" '{k}': {v},\\n\"\n s += ' },\\n'\n s += ' ],\\n'\n if 'num_tensor_args_to_observation_type' in entry:\n s += \" 'num_tensor_args_to_observation_type': {\\n\"\n for k, v in entry['num_tensor_args_to_observation_type'].items():\n s += f' {k}: {v},\\n'\n s += ' },\\n'\n custom_handled_fields = ['pattern', 'dtype_configs', 'num_tensor_args_to_observation_type']\n for field_name in entry:\n if field_name in custom_handled_fields:\n continue\n s += f\" '{field_name}': {entry[field_name]},\\n\"\n s += '}'\n return s", + "docstring": "Given a backend_config_dict entry, returns a string with the human readable representation of it.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\utils.py", + "ast_data": "FunctionDef name:entry_to_pretty_str arg:entry arguments arg Assign If Compare Assign Call If Compare For For Call If Compare For Call Assign For If Compare Return return:yes" + }, + { + "library": "pytorch", + "name": "register_torch_dispatch", + "source_code": "def register_torch_dispatch(op: _op_identifier, torch_dispatch_class: Any, func: Optional[Callable]=None, /, *, lib: Optional[Library]=None):\n if not isinstance(op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef)):\n raise ValueError(f'register_torch_dispatch({op}): got unexpected type for op: {type(op)}')\n if isinstance(op, torch._ops.OpOverload):\n op = op._name\n opdef = _maybe_get_opdef(op)\n if opdef is not None:\n return opdef.register_torch_dispatch(torch_dispatch_class, func)\n assert isinstance(op, str)\n\n def register(func):\n namespace, op_name = torch._library.utils.parse_namespace(op)\n if lib is None:\n use_lib = Library(namespace, 'FRAGMENT')\n _keep_alive.append(use_lib)\n else:\n use_lib = lib\n use_lib._register_torch_dispatch_rule(op_name, torch_dispatch_class, func)\n return func\n if func is None:\n return register\n else:\n return register(func)", + "docstring": "Registers a torch_dispatch rule for the given operator and `torch-dispatch-calling-convention`). Examples: >>> import torch >>> >>> @torch.library.custom_op(\"mylib::foo\", mutates_args={}) >>> def foo(x: torch.Tensor) -> torch.Tensor: >>> return x.clone() >>> >>> class MyMode(torch.utils._python_dispatch.TorchDispatchMode): >>> def __torch_dispatch__(self, func, types, args=(), kwargs=None): >>> return func(*args, **kwargs) >>> >>> @torch.library.register_torch_dispatch(\"mylib::foo\", MyMode) >>> def _(mode, func, types, args, kwargs): >>> x, = args >>> return x + 1 >>> >>> x = torch.randn(3) >>> y = foo(x) >>> assert torch.allclose(y, x) >>> >>> with MyMode(): >>> y = foo(x) >>> assert torch.allclose(y, x + 1)", + "type": "function", + "file_path": "pytorch\\torch\\library.py", + "ast_data": "FunctionDef name:register_torch_dispatch arguments arg arg arg arg If Call Raise Call Call If Call Assign Assign Call If Compare Return return:yes Call Call FunctionDef name:register arg:func arguments arg Assign Call If Compare Assign Call Call Assign Call Return return:yes If Compare Return return:yes Return return:yes Call" + }, + { + "library": "numpy", + "name": "BlasILP64NotFoundError", + "source_code": "class BlasILP64NotFoundError(NotFoundError):\n pass", + "docstring": "64-bit Blas libraries not found. Known libraries in numpy/distutils/site.cfg file are: openblas64_, openblas_ilp64", + "type": "class", + "file_path": "numpy\\numpy\\distutils\\system_info.py", + "ast_data": "ClassDef name:BlasILP64NotFoundError" + }, + { + "library": "cryptography", + "name": "public_bytes", + "source_code": "@abc.abstractmethod\ndef public_bytes(self, encoding: _serialization.Encoding, format: _serialization.PublicFormat) -> bytes:\n pass", + "docstring": "The serialized bytes of the public key.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x25519.py", + "ast_data": "FunctionDef name:public_bytes arg:self arg:encoding arg:format arguments arg arg arg" + }, + { + "library": "scipy", + "name": "fftn", + "source_code": "def fftn(x, shape=None, axes=None, overwrite_x=False):\n shape = _good_shape(x, shape, axes)\n return _pocketfft.fftn(x, shape, axes, None, overwrite_x)", + "docstring": "Return multidimensional discrete Fourier transform. The returned array contains:: y[j_1,..,j_d] = sum[k_1=0..n_1-1, ..., k_d=0..n_d-1] x[k_1,..,k_d] * prod[i=1..d] exp(-sqrt(-1)*2*pi/n_i * j_i * k_i) where d = len(x.shape) and n = x.shape. Parameters ---------- x : array_like The (N-D) array to transform. shape : int or array_like of ints or None, optional The shape of the result. If both and (see below) are None, is `shapeaxesshape`shape[i] >> import numpy as np >>> from scipy.fftpack import fftn, ifftn >>> y = (-np.arange(16), 8 - np.arange(16), np.arange(16)) >>> np.allclose(y, fftn(ifftn(y))) True", + "type": "function", + "file_path": "scipy\\scipy\\fftpack\\_basic.py", + "ast_data": "FunctionDef name:fftn arg:x arg:shape arg:axes arg:overwrite_x arguments arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "revert_proxy_model_permissions", + "source_code": "def revert_proxy_model_permissions(apps, schema_editor):\n update_proxy_model_permissions(apps, schema_editor, reverse=True)", + "docstring": "Update the content_type of proxy model permissions to use the ContentType of the concrete model.", + "type": "function", + "file_path": "django\\django\\contrib\\auth\\migrations\\0011_update_proxy_permissions.py", + "ast_data": "FunctionDef name:revert_proxy_model_permissions arg:apps arg:schema_editor arguments arg arg Call" + }, + { + "library": "tensorflow", + "name": "_rename_function", + "source_code": "def _rename_function(f, arg_num, name):\n func_code = f.__code__\n new_code = func_code.replace(co_argcount=arg_num, co_name=name)\n return types.FunctionType(new_code, f.__globals__, name, f.__defaults__, f.__closure__)", + "docstring": "Rename the given function's name appears in the stack trace.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\platform\\benchmark.py", + "ast_data": "FunctionDef name:_rename_function arg:f arg:arg_num arg:name arguments arg arg arg Assign Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "operator", + "source_code": "@property\ndef operator(self) -> 'LinearOperatorInversion':\n return self._operator", + "docstring": "The operator before inversion.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_inversion.py", + "ast_data": "FunctionDef name:operator arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "_data_mask", + "source_code": "def _data_mask(self):\n num_rows, num_cols = self.shape\n offset_inds = np.arange(self.data.shape[1])\n row = offset_inds - self.offsets[:, None]\n mask = row >= 0\n mask &= row < num_rows\n mask &= offset_inds < num_cols\n return mask", + "docstring": "Returns a mask of the same shape as self.data, where mask[i,j] is True when data[i,j] corresponds to a stored element.", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_dia.py", + "ast_data": "FunctionDef name:_data_mask arg:self arguments arg Assign Assign Call Assign Assign Compare Compare Compare Return return:yes" + }, + { + "library": "pygame", + "name": "initsysfonts_darwin", + "source_code": "def initsysfonts_darwin():\n fonts = {}\n fclist_locations = ['/usr/X11/bin/fc-list', '/usr/X11R6/bin/fc-list']\n for bin_location in fclist_locations:\n if exists(bin_location):\n fonts = initsysfonts_unix(bin_location)\n break\n if len(fonts) == 0:\n fonts = _font_finder_darwin()\n return fonts", + "docstring": "Read the fonts on MacOS, and OS X.", + "type": "function", + "file_path": "pygame\\src_py\\sysfont.py", + "ast_data": "FunctionDef name:initsysfonts_darwin arguments Assign Assign For If Call Assign Call If Compare Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_merge_delta_local_plans", + "source_code": "def _merge_delta_local_plans(cached_plans: list[SavePlan], delta_plans: list[SavePlan]) -> list[SavePlan]:\n merged_plans = []\n for cached_plan, delta_plan in zip(cached_plans, delta_plans):\n if delta_plan and (not delta_plan.usable):\n merged_plans.append(cached_plan)\n else:\n merged_plans.append(delta_plan)\n return merged_plans", + "docstring": "Merge a list of delta plans into a single plan. Args: cached_plans (List[SavePlan]): A list of cached plans. delta_plans (List[SavePlan]): A list of delta plans to merge. It can contain empty plans Returns: A single merged plan. If a delta plan is not usable, use the cached plan. Otherwise, use the delta plan.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner_helpers.py", + "ast_data": "FunctionDef name:_merge_delta_local_plans arg:cached_plans arg:delta_plans arguments arg arg Assign For Call If BoolOp Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "_add_inset_axes", + "source_code": "def _add_inset_axes(parent_axes, axes_class, axes_kwargs, axes_locator):\n if axes_class is None:\n axes_class = HostAxes\n if axes_kwargs is None:\n axes_kwargs = {}\n fig = parent_axes.get_figure(root=False)\n inset_axes = axes_class(fig, parent_axes.get_position(), **{'navigate': False, **axes_kwargs, 'axes_locator': axes_locator})\n return fig.add_axes(inset_axes)", + "docstring": "Helper function to add an inset axes and disable navigation in it.", + "type": "function", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\inset_locator.py", + "ast_data": "FunctionDef name:_add_inset_axes arg:parent_axes arg:axes_class arg:axes_kwargs arg:axes_locator arguments arg arg arg arg If Compare Assign If Compare Assign Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "Instruction", + "source_code": "@dataclass_slots\n@dataclasses.dataclass\nclass Instruction:\n opcode: int\n opname: str\n arg: Optional[int]\n argval: Any\n offset: Optional[int] = None\n starts_line: Optional[int] = None\n is_jump_target: bool = False\n positions: Optional['dis.Positions'] = None\n target: Optional['Instruction'] = None\n exn_tab_entry: Optional[InstructionExnTabEntry] = None\n argrepr: Optional[str] = None\n\n def __hash__(self) -> int:\n return id(self)\n\n def __eq__(self, other) -> bool:\n return id(self) == id(other)\n\n def short_inst_repr(self) -> str:\n return f'Instruction(opname={self.opname}, offset={self.offset})'\n\n def copy_positions(self, other: 'Instruction') -> None:\n self.starts_line = other.starts_line\n self.positions = other.positions", + "docstring": "A mutable version of dis.Instruction", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", + "ast_data": "ClassDef name:Instruction FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Compare Call Call FunctionDef name:short_inst_repr arg:self arguments arg Return return:yes FunctionDef name:copy_positions arg:self arg:other arguments arg arg Assign Assign" + }, + { + "library": "tensorflow", + "name": "_TensorShapeCodec", + "source_code": "class _TensorShapeCodec:\n\n def can_encode(self, pyobj):\n return isinstance(pyobj, TensorShape)\n\n def do_encode(self, tensor_shape_value, encode_fn):\n del encode_fn\n encoded_tensor_shape = struct_pb2.StructuredValue()\n encoded_tensor_shape.tensor_shape_value.CopyFrom(tensor_shape_value.as_proto())\n return encoded_tensor_shape\n\n def can_decode(self, value):\n return value.HasField('tensor_shape_value')\n\n def do_decode(self, value, decode_fn):\n del decode_fn\n return TensorShape(value.tensor_shape_value)", + "docstring": "Codec for .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", + "ast_data": "ClassDef name:_TensorShapeCodec FunctionDef name:can_encode arg:self arg:pyobj arguments arg arg Return return:yes Call FunctionDef name:do_encode arg:self arg:tensor_shape_value arg:encode_fn arguments arg arg arg Assign Call Call Call Return return:yes FunctionDef name:can_decode arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "get_site_packages", + "source_code": "def get_site_packages(self):\n if sys.version_info >= (3, 12):\n plat_path = Path(sysconfig.get_path('platlib'))\n elif 'deb_system' in sysconfig.get_scheme_names():\n plat_path = Path(sysconfig.get_path('platlib', 'deb_system'))\n else:\n plat_path = Path(sysconfig.get_path('platlib'))\n return self.installed / plat_path.relative_to(sys.exec_prefix)", + "docstring": "Depending on whether we have debian python or not, return dist_packages path or site_packages path.", + "type": "method", + "file_path": "scipy\\dev.py", + "ast_data": "FunctionDef name:get_site_packages arg:self arguments arg If Compare Assign Call Call If Compare Call Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "track_external", + "source_code": "def track_external(self, *external: Union[nn.Module, optim.Optimizer, torch.Tensor]) -> None:\n pass", + "docstring": "This is no-op for ``", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\_tools\\fsdp2_mem_tracker.py", + "ast_data": "FunctionDef name:track_external arg:self arguments arg arg" + }, + { + "library": "sphinx", + "name": "emit_firstresult", + "source_code": "def emit_firstresult(self, name: str, *args: Any, allowed_exceptions: tuple[type[Exception], ...]=()) -> Any:\n for result in self.emit(name, *args, allowed_exceptions=allowed_exceptions):\n if result is not None:\n return result\n return None", + "docstring": "Emit a Sphinx event and returns first result. This returns the result of the first handler that doesn't return ``.", + "type": "method", + "file_path": "sphinx\\sphinx\\events.py", + "ast_data": "FunctionDef name:emit_firstresult arg:self arg:name arguments arg arg arg arg For Call If Compare Return return:yes Return return:no" + }, + { + "library": "pandas", + "name": "_validate_or_indexify_columns", + "source_code": "def _validate_or_indexify_columns(content: list[np.ndarray], columns: Index | None) -> Index:\n if columns is None:\n columns = default_index(len(content))\n else:\n is_mi_list = isinstance(columns, list) and all((isinstance(col, list) for col in columns))\n if not is_mi_list and len(columns) != len(content):\n raise AssertionError(f'{len(columns)} columns passed, passed data had {len(content)} columns')\n if is_mi_list:\n if len({len(col) for col in columns}) > 1:\n raise ValueError('Length of columns passed for MultiIndex columns is different')\n if columns and len(columns[0]) != len(content):\n raise ValueError(f'{len(columns[0])} columns passed, passed data had {len(content)} columns')\n return columns", + "docstring": "If columns is None, make numbers as column names; Otherwise, validate that columns have valid length. Parameters ---------- content : list of np.ndarrays columns : Index or None Returns ------- Index If columns is None, assign positional column index value as columns. Raises ------ 1. AssertionError when content is not composed of list of lists, and if length of columns is not equal to length of content. 2. ValueError when content is list of lists, but length of each sub-list is not equal 3. ValueError when content is list of lists, but length of sub-list is not equal to length of content", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\construction.py", + "ast_data": "FunctionDef name:_validate_or_indexify_columns arg:content arg:columns arguments arg arg If Compare Assign Call Call Assign BoolOp Call Call Call If BoolOp Compare Call Call Raise Call Call Call If If Compare Call Call Raise Call If BoolOp Compare Call Call Raise Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "path_to_str", + "source_code": "@tf_export('compat.path_to_str')\ndef path_to_str(path):\n if hasattr(path, '__fspath__'):\n path = as_str_any(path.__fspath__())\n return path", + "docstring": "Converts input which is a object to type. Converts from any python constant representation of a object to a string. If the input is not a object, simply returns the input. Args: path: An object that can be converted to path representation. Returns: A object. Usage: In case a simplified version of the path is needed from an object. Examples:", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\compat.py", + "ast_data": "FunctionDef name:path_to_str arg:path arguments arg If Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "update_hash_with_primitive_value", + "source_code": "def update_hash_with_primitive_value(hash_value, value):\n hash_const = np.uint64(11400714819323197440)\n hash_value = np.uint64(hash_value)\n value = np.uint64(value)\n hash_value = np.array([hash_value])\n value = np.array([value])\n hash_value = np.bitwise_xor(hash_value, value + hash_const + np.left_shift(hash_value, 10) + np.right_shift(hash_value, 4))[0]\n return hash_value", + "docstring": "Update the hash value using a primitive value. Args: hash_value (uint64): The current hash value. value: The primitive value to incorporate into the hash. Returns: int: The updated hash value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py", + "ast_data": "FunctionDef name:update_hash_with_primitive_value arg:hash_value arg:value arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "SoftMarginLoss", + "source_code": "class SoftMarginLoss(_Loss):\n __constants__ = ['reduction']\n\n def __init__(self, size_average=None, reduce=None, reduction: str='mean') -> None:\n super().__init__(size_average, reduce, reduction)\n\n def forward(self, input: Tensor, target: Tensor) -> Tensor:\n return F.soft_margin_loss(input, target, reduction=self.reduction)", + "docstring": "Creates a criterion that optimizes a two-class classification logistic loss between input tensor :math: and target tensor :math: (containing 1 or -1). .. math:: \\text{loss}(x, y) = \\sum_i \\frac{\\log(1 + \\exp(-y[i]*x[i]))}{\\text{x.nelement}()} Args: size_average (bool, optional): Deprecated (see :attr:). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr: is set to `reducereductionsize_averagereducesize_averagesize_averagereducereduction(*)*(*)reduction(*)`, same shape as input.", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\loss.py", + "ast_data": "ClassDef name:SoftMarginLoss Assign FunctionDef name:__init__ arg:self arg:size_average arg:reduce arg:reduction arguments arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arg:target arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "tree_leaves", + "source_code": "def tree_leaves(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> list[Any]:\n return optree.tree_leaves(tree, is_leaf=is_leaf, none_is_leaf=True, namespace='torch')", + "docstring": "Get the leaves of a pytree. See also :func:. >>> tree = {\"b\": (2, [3, 4]), \"a\": 1, \"c\": None, \"d\": 5} >>> tree_leaves(tree) [2, 3, 4, 1, None, 5] >>> tree_leaves(1) [1] >>> tree_leaves(None) [None] Args: tree (pytree): A pytree to flatten. is_leaf (callable, optional): An extra leaf predicate function that will be called at each flattening step. The function should have a single argument with signature `True`, the whole subtree being treated as a leaf. Otherwise, the default pytree registry will be used to determine a node is a leaf or not. If the function is not specified, the default pytree registry will be used. Returns: A list of leaf values.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py", + "ast_data": "FunctionDef name:tree_leaves arg:tree arg:is_leaf arguments arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "set_reshard_after_forward", + "source_code": "def set_reshard_after_forward(self, reshard_after_forward: bool, recurse: bool=True) -> None:\n self_module = cast(nn.Module, self)\n modules = list(self_module.modules()) if recurse else [self_module]\n for module in modules:\n if isinstance(module, FSDPModule):\n state = module._get_fsdp_state()\n if (fsdp_param_group := state._fsdp_param_group):\n fsdp_param_group.post_forward_mesh_info = _get_post_forward_mesh_info(reshard_after_forward, fsdp_param_group.mesh_info)", + "docstring": "Sets if the module should reshard parameters after forward. This can be used to change the `` for training. Args: reshard_after_forward (bool): Whether to reshard parameters after forward. recurse (bool): Whether to set for all FSDP submodules or just the passed-in module.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py", + "ast_data": "FunctionDef name:set_reshard_after_forward arg:self arg:reshard_after_forward arg:recurse arguments arg arg arg Assign Call Assign Call Call For If Call Assign Call If Assign Call" + }, + { + "library": "cherrypy", + "name": "__repr__", + "source_code": "def __repr__(self):\n return 'httputil.Host(%r, %r, %r)' % (self.ip, self.port, self.name)", + "docstring": "Render a :class: instance representation.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\httputil.py", + "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_kl_beta_beta", + "source_code": "@kullback_leibler.RegisterKL(Beta, Beta)\ndef _kl_beta_beta(d1, d2, name=None):\n\n def delta(fn, is_property=True):\n fn1 = getattr(d1, fn)\n fn2 = getattr(d2, fn)\n return fn2 - fn1 if is_property else fn2() - fn1()\n with ops.name_scope(name, 'kl_beta_beta', values=[d1.concentration1, d1.concentration0, d1.total_concentration, d2.concentration1, d2.concentration0, d2.total_concentration]):\n return delta('_log_normalization', is_property=False) - math_ops.digamma(d1.concentration1) * delta('concentration1') - math_ops.digamma(d1.concentration0) * delta('concentration0') + math_ops.digamma(d1.total_concentration) * delta('total_concentration')", + "docstring": "Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta. Args: d1: instance of a Beta distribution object. d2: instance of a Beta distribution object. name: (optional) Name to use for created operations. default is \"kl_beta_beta\". Returns: Batchwise KL(d1 || d2)", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\beta.py", + "ast_data": "FunctionDef name:_kl_beta_beta arg:d1 arg:d2 arg:name arguments arg arg arg FunctionDef name:delta arg:fn arg:is_property arguments arg arg Assign Call Assign Call Return return:yes Call Call With Call Return return:yes Call Call Call Call Call Call Call Call" + }, + { + "library": "django", + "name": "deactivate", + "source_code": "def deactivate():\n if hasattr(_active, 'value'):\n del _active.value", + "docstring": "Uninstall the active translation object so that further _() calls resolve to the default translation object.", + "type": "function", + "file_path": "django\\django\\utils\\translation\\trans_real.py", + "ast_data": "FunctionDef name:deactivate arguments If Call" + }, + { + "library": "django", + "name": "has_change_permission", + "source_code": "def has_change_permission(self, request, obj=None):\n opts = self.opts\n codename = get_permission_codename('change', opts)\n return request.user.has_perm('%s.%s' % (opts.app_label, codename))", + "docstring": "Return True if the given request has permission to change the given Django model instance, the default implementation doesn't examine the parameter. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to change the model instance. If is None, this should return True if the given request has permission to change *any* object of the given type.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:has_change_permission arg:self arg:request arg:obj arguments arg arg arg Assign Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "allow_in_graph", + "source_code": "def allow_in_graph(fn):\n if isinstance(fn, (list, tuple)):\n return [allow_in_graph(x) for x in fn]\n assert callable(fn), 'allow_in_graph expects a callable'\n if trace_rules.lookup_callable(fn) != variables.TorchInGraphFunctionVariable:\n fn_id = id(fn)\n trace_rules._disallowed_callable_ids.remove(fn_id)\n trace_rules._allowed_callable_ids.add(fn_id)\n\n def deregister():\n trace_rules._allowed_callable_ids.remove(fn_id)\n weakref.finalize(fn, deregister)\n return fn", + "docstring": "Tells the compiler frontend (Dynamo) to skip symbolic introspection of the function and instead directly write it to the graph when encountered. See :func:'s docstring for the full documentation WARNING: this API can be a footgun, please read the documentation carefully.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\decorators.py", + "ast_data": "FunctionDef name:allow_in_graph arg:fn arguments arg If Call Return return:yes Call Call If Compare Call Assign Call Call Call FunctionDef name:deregister arguments Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_codegen_partition_wrapper", + "source_code": "def _codegen_partition_wrapper(self, partition: PartitionType, signature: GraphPartitionSignature) -> None:\n from .codegen.wrapper import SubgraphPythonWrapperCodegen\n parent_wrapper_code = V.graph.wrapper_code\n graph_partition_id = next(self._graph_partition_counter)\n with V.graph.set_current_wrapper_code():\n V.graph.init_wrapper_code(is_subgraph=True, subgraph_name=f'partition_{graph_partition_id}', parent_wrapper_code=parent_wrapper_code, partition_signatures=signature)\n self._codegen(partition)\n assert isinstance(V.graph.wrapper_code, SubgraphPythonWrapperCodegen)\n signature = self.clean_removed_buffer_from_partition_signatures(signature)\n V.graph.wrapper_code.partition_signatures = signature\n V.graph.wrapper_code.write_prefix()\n partition_code, _ = V.graph.wrapper_code.generate(V.graph.is_inference)\n V.graph.wrapper_code.define_subgraph_launcher_fn(partition_code.value)\n V.graph.wrapper_code.codegen_partition_call(graph_partition_id, signature)\n V.graph.wrapper_code.allocated.update([node.get_name() for node in signature.output_nodes])", + "docstring": "Codegen a partition given its inputs/outputs", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:_codegen_partition_wrapper arg:self arg:partition arg:signature arguments arg arg arg Assign Assign Call With Call Call Call Call Assign Call Assign Call Assign Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "set_learning_phase", + "source_code": "@doc_controls.do_not_generate_docs\ndef set_learning_phase(value):\n warnings.warn('`tf.keras.backend.set_learning_phase` is deprecated and will be removed after 2020-10-11. To update it, simply pass a True/False value to the `training` argument of the `__call__` method of your layer or model.')\n deprecated_internal_set_learning_phase(value)", + "docstring": "Sets the learning phase to a fixed value. The backend learning phase affects any code that calls In particular, all Keras built-in layers use the learning phase as the default for the arg to . User-written layers and models can achieve the same behavior with code that looks like: Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Raises: ValueError: if is neither nor .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:set_learning_phase arg:value arguments arg Call Call" + }, + { + "library": "pytorch", + "name": "_set_dependent_itervars", + "source_code": "def _set_dependent_itervars(self, index: sympy.Expr):\n for s in index.free_symbols:\n if s in V.kernel.itervars:\n self.dependent_itervars.add(s)\n elif s.name in V.kernel.cse.varname_map:\n self.dependent_itervars.update(V.kernel.cse.varname_map[s.name].dependent_itervars)", + "docstring": "Set the relevant itervars for this variable based on the expression. This includes the itervars directly used in the as well as relevant itervars of other cse variables used in the .", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_utils.py", + "ast_data": "FunctionDef name:_set_dependent_itervars arg:self arg:index arguments arg arg For If Compare Call If Compare Call" + }, + { + "library": "matplotlib", + "name": "get_current_underline_thickness", + "source_code": "def get_current_underline_thickness(self) -> float:\n return self.fontset.get_underline_thickness(self.font, self.fontsize, self.dpi)", + "docstring": "Return the underline thickness for this state.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py", + "ast_data": "FunctionDef name:get_current_underline_thickness arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "current", + "source_code": "def current():\n if ops.executing_eagerly_outside_functions():\n d = context.context().device_name\n else:\n op = _FakeOperation()\n ops.get_default_graph()._apply_device_functions(op)\n d = op.device\n return d", + "docstring": "Return a string (not canonicalized) for the current device.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\device_util.py", + "ast_data": "FunctionDef name:current arguments If Call Assign Call Assign Call Call Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "state_dict", + "source_code": "def state_dict(self):\n state = self._convert_mask(self.state)\n return {'state': state, 'data_groups': self.data_groups, '_container': self._container.state_dict()}", + "docstring": "Returns the state of the optimizer as a :class:. It contains: * state - contains name -> mask mapping. * data_groups - a list containing all sparsity configuration groups with the key name specifying the name of the data * container_state_dict - the state dictionary of the internal container model used for sparsification", + "type": "method", + "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\base_data_sparsifier.py", + "ast_data": "FunctionDef name:state_dict arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "TensorTypeToName", + "source_code": "def TensorTypeToName(tensor_type):\n for name, value in schema_fb.TensorType.__dict__.items():\n if value == tensor_type:\n return name\n return None", + "docstring": "Converts a numerical enum to a readable tensor type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\tools\\visualize.py", + "ast_data": "FunctionDef name:TensorTypeToName arg:tensor_type arguments arg For Call If Compare Return return:yes Return return:no" + }, + { + "library": "pandas", + "name": "flags", + "source_code": "@final\n@property\ndef flags(self) -> Flags:\n return self._flags", + "docstring": "Get the properties associated with this pandas object. The available flags are * :attr: See Also -------- Flags : Flags that apply to pandas objects. DataFrame.attrs : Global metadata applying to this dataset. Notes ----- \"Flags\" differ from \"metadata\". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 2]}) >>> df.flags Flags can be get or set using `` >>> df.flags.allows_duplicate_labels True >>> df.flags.allows_duplicate_labels = False Or by slicing with a key >>> df.flags[\"allows_duplicate_labels\"] False >>> df.flags[\"allows_duplicate_labels\"] = True", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:flags arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "BaseDatabaseValidation", + "source_code": "class BaseDatabaseValidation:\n\n def __init__(self, connection):\n self.connection = connection\n\n def __del__(self):\n del self.connection\n\n def check(self, **kwargs):\n return []\n\n def check_field(self, field, **kwargs):\n errors = []\n if hasattr(self, 'check_field_type') and (not getattr(field, 'remote_field', None)):\n db_supports_all_required_features = all((getattr(self.connection.features, feature, False) for feature in field.model._meta.required_db_features))\n if db_supports_all_required_features:\n field_type = field.db_type(self.connection)\n if field_type is not None:\n errors.extend(self.check_field_type(field, field_type))\n return errors", + "docstring": "Encapsulate backend-specific validation.", + "type": "class", + "file_path": "django\\django\\db\\backends\\base\\validation.py", + "ast_data": "ClassDef name:BaseDatabaseValidation FunctionDef name:__init__ arg:self arg:connection arguments arg arg Assign FunctionDef name:__del__ arg:self arguments arg FunctionDef name:check arg:self arguments arg arg Return return:no FunctionDef name:check_field arg:self arg:field arguments arg arg arg Assign If BoolOp Call Call Assign Call Call If Assign Call If Compare Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "sync", + "source_code": "def sync(self):\n if getattr(self, '_async_checkpointer_impl', None) is not None:\n self._async_checkpointer_impl.sync()", + "docstring": "Wait for any outstanding save or restore operations.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:sync arg:self arguments arg If Compare Call Call" + }, + { + "library": "tensorflow", + "name": "_num_relevant", + "source_code": "def _num_relevant(labels, k):\n if k < 1:\n raise ValueError(f'Invalid k={k}')\n with ops.name_scope(None, 'num_relevant', (labels,)) as scope:\n labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)\n if isinstance(labels, sparse_tensor.SparseTensor):\n return math_ops.minimum(sets.set_size(labels), k, name=scope)\n num_labels = math_ops.reduce_sum(array_ops.where_v2(math_ops.greater_equal(labels, 0), array_ops.ones_like(labels), array_ops.zeros_like(labels)), axis=-1)\n return math_ops.minimum(num_labels, k, name=scope)", + "docstring": "Computes number of relevant values for each row in labels. For labels with shape [D1, ... DN, num_labels], this is the minimum of and . Args: labels: or with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and has shape [batch_size, num_labels]. k: Integer, k for @k metric. Returns: Integer of shape [D1, ... DN], where each value is the number of relevant values for that row. Raises: ValueError: if inputs have invalid dtypes or values.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py", + "ast_data": "FunctionDef name:_num_relevant arg:labels arg:k arguments arg arg If Compare Raise Call With Call Assign Call If Call Return return:yes Call Call Assign Call Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "worker_devices", + "source_code": "@property\ndef worker_devices(self):\n raise NotImplementedError('must be implemented in descendants')", + "docstring": "Returns the tuple of all devices used to for compute replica execution.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", + "ast_data": "FunctionDef name:worker_devices arg:self arguments arg Raise Call" + }, + { + "library": "tensorflow", + "name": "_maybe_add_default_serving_output", + "source_code": "def _maybe_add_default_serving_output(export_outputs):\n if len(export_outputs) == 1:\n (key, value), = export_outputs.items()\n if key != signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n export_outputs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = value\n if len(export_outputs) > 1:\n if signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in export_outputs:\n raise ValueError('Multiple export_outputs were provided, but none of them is specified as the default. Do this by naming one of them with signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.')\n return export_outputs", + "docstring": "Add a default serving output to the export_outputs if not present. Args: export_outputs: Describes the output signatures to be exported to and used during serving. Should be a dict. Returns: export_outputs dict with default serving signature added if necessary Raises: ValueError: if multiple export_outputs were provided without a default serving key.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_utils.py", + "ast_data": "FunctionDef name:_maybe_add_default_serving_output arg:export_outputs arguments arg If Compare Call Assign Call If Compare Assign If Compare Call If Compare Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "benchmark_map_and_filter_fusion", + "source_code": "def benchmark_map_and_filter_fusion(self):\n chain_lengths = [0, 1, 2, 5, 10, 20, 50]\n for chain_length in chain_lengths:\n self._benchmark_map_and_filter_fusion(chain_length=chain_length, optimize_dataset=False)\n self._benchmark_map_and_filter_fusion(chain_length=chain_length, optimize_dataset=True)", + "docstring": "Evaluates performance map of fusion.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\benchmarks\\optimize_benchmark.py", + "ast_data": "FunctionDef name:benchmark_map_and_filter_fusion arg:self arguments arg Assign For Call Call" + }, + { + "library": "pytorch", + "name": "grad", + "source_code": "@exposed_in('torch.func')\ndef grad(func: Callable, argnums: argnums_t=0, has_aux: bool=False) -> Callable:\n import torch._functorch.eager_transforms as eager_transforms\n from torch._dynamo import is_compiling\n\n def wrapper(*args, **kwargs):\n return eager_transforms.grad_impl(func, argnums, has_aux, args, kwargs)\n if not is_compiling():\n wrapper = functools.wraps(func)(wrapper)\n return wrapper", + "docstring": "``.", + "type": "function", + "file_path": "pytorch\\torch\\_functorch\\apis.py", + "ast_data": "FunctionDef name:grad arg:func arg:argnums arg:has_aux arguments arg arg arg FunctionDef name:wrapper arguments arg arg Return return:yes Call If Call Assign Call Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "_check_retval_", + "source_code": "def _check_retval_(self):\n return self.contents", + "docstring": "This method is called when this class is used as the .restype attribute for a shared-library function, to automatically wrap the pointer into an array.", + "type": "method", + "file_path": "numpy\\numpy\\ctypeslib\\_ctypeslib.py", + "ast_data": "FunctionDef name:_check_retval_ arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "ResolvedExportOptions", + "source_code": "@deprecated('torch.onnx.dynamo_export is deprecated since 2.7.0. Please use torch.onnx.export(..., dynamo=True) instead.', category=None)\nclass ResolvedExportOptions(ExportOptions):\n\n def __init__(self):\n from torch.onnx._internal.fx import dynamo_graph_extractor, onnxfunction_dispatcher\n self.dynamic_shapes: bool = True\n self.fx_tracer: dynamo_graph_extractor.DynamoExport = dynamo_graph_extractor.DynamoExport()\n self.fake_context = None\n self.onnx_registry: OnnxRegistry = OnnxRegistry()\n self.decomposition_table = decomposition_table.create_onnx_friendly_decomposition_table(self.onnx_registry)\n self.onnxfunction_dispatcher = onnxfunction_dispatcher.OnnxFunctionDispatcher(self.onnx_registry)", + "docstring": "Consolidates :class: with default values. All unspecified options from :class: are assigned a default value. This is an internal class and its API may be changed at any time without notice.", + "type": "class", + "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py", + "ast_data": "ClassDef name:ResolvedExportOptions FunctionDef name:__init__ arg:self arguments arg Call Assign Call Assign Call Assign Call Call" + }, + { + "library": "cherrypy", + "name": "tonative", + "source_code": "def tonative(n, encoding='ISO-8859-1'):\n if isinstance(n, bytes):\n return n.decode(encoding)\n return n", + "docstring": "Return the given string as a native string in the given encoding.", + "type": "function", + "file_path": "cherrypy\\cherrypy\\_cpcompat.py", + "ast_data": "FunctionDef name:tonative arg:n arg:encoding arguments arg arg If Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, loop_var, loop_len, pfor_ops, fallback_to_while_loop, all_indices=None, all_indices_partitioned=False, pfor_config=None, warn=False):\n assert isinstance(loop_var, tensor_lib.Tensor)\n assert loop_var.op.type == 'PlaceholderWithDefault'\n self._loop_var = loop_var\n loop_len_value = tensor_util.constant_value(loop_len)\n if loop_len_value is not None:\n loop_len = loop_len_value\n self._loop_len_vector = ops.convert_to_tensor([loop_len])\n else:\n self._loop_len_vector = array_ops.reshape(loop_len, [1])\n self._all_indices_partitioned = all_indices_partitioned\n if all_indices_partitioned:\n assert all_indices is not None\n if all_indices is None:\n self.all_indices = math_ops.range(loop_len, dtype=dtypes.int32, name='all_indices')\n else:\n self.all_indices = all_indices\n self._conversion_map = object_identity.ObjectIdentityDictionary()\n self._conversion_map[loop_var] = wrap(self.all_indices, True)\n self._pfor_ops = set(pfor_ops)\n self._pfor_op_ids = set((x._id for x in pfor_ops))\n self._fallback_to_while_loop = fallback_to_while_loop\n self._warn = warn\n self._pfor_config = pfor_config", + "docstring": "Creates an object to rewrite a parallel-for loop. Args: loop_var: Tensor output of a Placeholder operation. The value should be an int32 scalar representing the loop iteration number. loop_len: A scalar or scalar Tensor representing the number of iterations the loop is run for. pfor_ops: List of all ops inside the loop body. fallback_to_while_loop: If True, on failure to vectorize an op, a while loop is used to sequentially execute that op. all_indices: If not None, an int32 vector with size representing the iteration ids that are still active. These values should be unique and sorted. However they may not be contiguous. This is typically the case when inside a control flow construct which has partitioned the indices of the iterations that are being converted. all_indices_partitioned: If True, this object is being constructed from a control flow construct where not all the pfor iterations are guaranteed to be active. pfor_config: PForConfig object used while constructing the loop body. warn: Whether or not to warn on while loop conversions.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:loop_var arg:loop_len arg:pfor_ops arg:fallback_to_while_loop arg:all_indices arg:all_indices_partitioned arg:pfor_config arg:warn arguments arg arg arg arg arg arg arg arg arg Call Compare Assign Assign Call If Compare Assign Assign Call Assign Call Assign If Compare If Compare Assign Call Assign Assign Call Assign Call Assign Call Assign Call Assign Assign Assign" + }, + { + "library": "scikit-learn", + "name": "__call__", + "source_code": "def __call__(self, X, Y=None, eval_gradient=False):\n X = np.atleast_2d(X)\n if Y is None:\n dists = squareform(pdist(X, metric='euclidean'))\n arg = np.pi * dists / self.periodicity\n sin_of_arg = np.sin(arg)\n K = np.exp(-2 * (sin_of_arg / self.length_scale) ** 2)\n else:\n if eval_gradient:\n raise ValueError('Gradient can only be evaluated when Y is None.')\n dists = cdist(X, Y, metric='euclidean')\n K = np.exp(-2 * (np.sin(np.pi / self.periodicity * dists) / self.length_scale) ** 2)\n if eval_gradient:\n cos_of_arg = np.cos(arg)\n if not self.hyperparameter_length_scale.fixed:\n length_scale_gradient = 4 / self.length_scale ** 2 * sin_of_arg ** 2 * K\n length_scale_gradient = length_scale_gradient[:, :, np.newaxis]\n else:\n length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))\n if not self.hyperparameter_periodicity.fixed:\n periodicity_gradient = 4 * arg / self.length_scale ** 2 * cos_of_arg * sin_of_arg * K\n periodicity_gradient = periodicity_gradient[:, :, np.newaxis]\n else:\n periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))\n return (K, np.dstack((length_scale_gradient, periodicity_gradient)))\n else:\n return K", + "docstring": "Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : ndarray of shape (n_samples_Y, n_features), default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Only supported when Y is None. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when is True.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:X arg:Y arg:eval_gradient arguments arg arg arg arg Assign Call If Compare Assign Call Call Assign Assign Call Assign Call If Raise Call Assign Call Assign Call Call If Assign Call If Assign Assign Assign Call If Assign Assign Assign Call Return return:yes Call Return return:yes" + }, + { + "library": "cryptography", + "name": "public_bytes_raw", + "source_code": "@abc.abstractmethod\ndef public_bytes_raw(self) -> bytes:\n pass", + "docstring": "The raw bytes of the public key. Equivalent to public_bytes(Raw, Raw).", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x25519.py", + "ast_data": "FunctionDef name:public_bytes_raw arg:self arguments arg" + }, + { + "library": "pygame", + "name": "packager_imports", + "source_code": "def packager_imports():\n import atexit\n import numpy\n import OpenGL.GL\n import pygame.macosx\n import pygame.colordict", + "docstring": "some additional imports that py2app/py2exe will want to see", + "type": "function", + "file_path": "pygame\\src_py\\__init__.py", + "ast_data": "FunctionDef name:packager_imports arguments" + }, + { + "library": "pandas", + "name": "_get_data_as_items", + "source_code": "def _get_data_as_items(self) -> list[tuple[str, int]]:\n rng = self._range\n return [('start', rng.start), ('stop', rng.stop), ('step', rng.step)]", + "docstring": "return a list of tuples of start, stop, step", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\range.py", + "ast_data": "FunctionDef name:_get_data_as_items arg:self arguments arg Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y):\n check_consistent_length(X, y)\n X = validate_data(self, X, dtype=np.float64, force_writeable=True, copy=self.copy, ensure_min_samples=2)\n y = check_array(y, input_name='y', dtype=np.float64, force_writeable=True, copy=self.copy, ensure_2d=False)\n if y.ndim == 1:\n y = y.reshape(-1, 1)\n n_components = self.n_components\n rank_upper_bound = min(X.shape[0], X.shape[1], y.shape[1])\n if n_components > rank_upper_bound:\n raise ValueError(f'`n_components` upper bound is {rank_upper_bound}. Got {n_components} instead. Reduce `n_components`.')\n X, y, self._x_mean, self._y_mean, self._x_std, self._y_std = _center_scale_xy(X, y, self.scale)\n C = np.dot(X.T, y)\n U, s, Vt = svd(C, full_matrices=False)\n U = U[:, :n_components]\n Vt = Vt[:n_components]\n U, Vt = svd_flip(U, Vt)\n V = Vt.T\n self.x_weights_ = U\n self.y_weights_ = V\n self._n_features_out = self.x_weights_.shape[1]\n return self", + "docstring": "Fit model to data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training samples. y : array-like of shape (n_samples,) or (n_samples, n_targets) Targets. Returns ------- self : object Fitted estimator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cross_decomposition\\_pls.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Assign Call Assign Call If Compare Assign Call Assign Assign Call If Compare Raise Call Assign Call Assign Call Assign Call Assign Assign Assign Call Assign Assign Assign Assign Return return:yes Call" + }, + { + "library": "sphinx", + "name": "init", + "source_code": "def init(self, builder: Builder, theme: Theme | None=None, dirs: list[str] | None=None) -> None:\n msg = 'must be implemented in subclasses'\n raise NotImplementedError(msg)", + "docstring": "Called by the builder to initialize the template system. *builder* is the builder object; you'll probably want to look at the value of `sphinx.theming.Theme` object or None; in the latter case, *dirs* can be list of fixed directories to look for templates.", + "type": "method", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "FunctionDef name:init arg:self arg:builder arg:theme arg:dirs arguments arg arg arg arg Assign Raise Call" + }, + { + "library": "pytorch", + "name": "_is_supported", + "source_code": "def _is_supported(self, module: nn.Module, insert: bool=False) -> bool:\n is_supported_type = any((isinstance(module, x) for x in self.DEFAULT_DYNAMIC_STATIC_CHECK_SUPPORTED))\n future_supported_type = any((isinstance(module, x) for x in self.DEFAULT_DYNAMIC_STATIC_FUTURE_SUPPORTED))\n supported = is_supported_type or future_supported_type\n if insert:\n return supported\n else:\n has_obs = hasattr(module, self.DEFAULT_PRE_OBSERVER_NAME) and hasattr(module, self.DEFAULT_POST_OBSERVER_NAME)\n return supported and has_obs", + "docstring": "Returns whether the given module is supported for observers Args module: The module to check and ensure is supported insert: True if this is check for observer insertion, false if for report gen Returns True if the module is supported by observer, False otherwise", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py", + "ast_data": "FunctionDef name:_is_supported arg:self arg:module arg:insert arguments arg arg arg Assign Call Call Assign Call Call Assign BoolOp If Return return:yes Assign BoolOp Call Call Return return:yes BoolOp" + }, + { + "library": "tensorflow", + "name": "set_union", + "source_code": "@tf_export('sets.union', v1=['sets.union', 'sets.set_union'])\n@dispatch.add_dispatch_support\ndef set_union(a, b, validate_indices=True):\n a, b, _ = _convert_to_tensors_or_sparse_tensors(a, b)\n return _set_operation(a, b, 'union', validate_indices)", + "docstring": "Compute set union of elements in last dimension of and . All but the last dimension of and must match. Example: Args: a: or of the same type as . If sparse, indices must be sorted in row-major order. b: or of the same type as . If sparse, indices must be sorted in row-major order. validate_indices: Whether to validate the order and range of sparse indices in and . Returns: A whose shape is the same rank as and , and all but the last dimension the same. Elements along the last dimension contain the unions.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\sets_impl.py", + "ast_data": "FunctionDef name:set_union arg:a arg:b arg:validate_indices arguments arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "DtypeView", + "source_code": "@ir_dataclass\nclass DtypeView(BaseView):\n target_dtype: torch.dtype\n\n @classmethod\n def create(cls, x, new_dtype):\n if is_storage_and_layout(x):\n storage, old_layout = as_storage_and_layout(x)\n new_layout = FixedLayout(old_layout.device, new_dtype, old_layout.size, old_layout.stride, old_layout.offset)\n return ReinterpretView(data=storage, layout=new_layout)\n return DtypeView(data=x, target_dtype=new_dtype)\n\n def __str__(self) -> str:\n return self.str_helper([self.data, self.target_dtype])\n __repr__ = __str__\n\n @property\n def dtype(self):\n return self.target_dtype\n\n def get_size(self) -> Sequence[Expr]:\n return self.data.get_size()\n\n def make_loader(self) -> Callable[[Sequence[Expr]], OpsValue]:\n inner = self.data.make_loader()\n\n def loader(idx):\n return ops.to_dtype_bitcast(inner(idx), self.target_dtype, self.data.dtype)\n return loader", + "docstring": "Pretend our storage has a different type", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "ClassDef name:DtypeView FunctionDef name:create arg:cls arg:x arg:new_dtype arguments arg arg arg If Call Assign Call Assign Call Return return:yes Call Return return:yes Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Assign FunctionDef name:dtype arg:self arguments arg Return return:yes FunctionDef name:get_size arg:self arguments arg Return return:yes Call FunctionDef name:make_loader arg:self arguments arg Assign Call FunctionDef name:loader arg:idx arguments arg Return return:yes Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "map", + "source_code": "@staticmethod\n@abc.abstractmethod\ndef map(data: Series | DataFrame | np.ndarray, func: AggFuncType, args: tuple, kwargs: dict[str, Any], decorator: Callable | None, skip_na: bool):\n pass", + "docstring": "Executor method to run functions elementwise. In general, pandas uses ``.", + "type": "method", + "file_path": "pandas\\pandas\\core\\apply.py", + "ast_data": "FunctionDef name:map arg:data arg:func arg:args arg:kwargs arg:decorator arg:skip_na arguments arg arg arg arg arg arg" + }, + { + "library": "tensorflow", + "name": "Reduction", + "source_code": "@tf_export(v1=['losses.Reduction'])\nclass Reduction:\n NONE = 'none'\n SUM = 'weighted_sum'\n SUM_OVER_BATCH_SIZE = 'weighted_sum_over_batch_size'\n MEAN = 'weighted_mean'\n SUM_BY_NONZERO_WEIGHTS = 'weighted_sum_by_nonzero_weights'\n SUM_OVER_NONZERO_WEIGHTS = SUM_BY_NONZERO_WEIGHTS\n\n @classmethod\n def all(cls):\n return (cls.NONE, cls.SUM, cls.MEAN, cls.SUM_OVER_BATCH_SIZE, cls.SUM_OVER_NONZERO_WEIGHTS, cls.SUM_BY_NONZERO_WEIGHTS)\n\n @classmethod\n def validate(cls, key):\n if key not in cls.all():\n raise ValueError(f'Invalid Reduction Key {key}. Key should be one of {cls.all()}.')", + "docstring": "Types of loss reduction. Contains the following values: * : Un-reduced weighted losses with the same shape as input. * : Scalar sum of weighted losses. * : Scalar divided by sum of weights. DEPRECATED. * : Scalar divided by number of elements in losses. * : Scalar divided by number of non-zero weights. DEPRECATED. * : Same as . DEPRECATED.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\losses_impl.py", + "ast_data": "ClassDef name:Reduction Assign Assign Assign Assign Assign Assign FunctionDef name:all arg:cls arguments arg Return return:yes FunctionDef name:validate arg:cls arg:key arguments arg arg If Compare Call Raise Call Call Call" + }, + { + "library": "tensorflow", + "name": "_partition_outer_dimension", + "source_code": "def _partition_outer_dimension(value, row_partition):\n is_ragged = row_partition.uniform_row_length() is None\n if isinstance(value, tensor.Tensor) and (not is_ragged):\n new_shape = array_ops.concat([[row_partition.nrows(), row_partition.uniform_row_length()], array_ops.shape(value, out_type=row_partition.dtype)[1:]], axis=0)\n return array_ops.reshape(value, new_shape)\n elif isinstance(value, (tensor.Tensor, ragged_tensor.RaggedTensor)):\n return ragged_tensor.RaggedTensor._from_row_partition(value, row_partition)\n else:\n assert isinstance(value, StructuredTensor)\n nrows = row_partition.static_nrows\n ncols = row_partition.static_uniform_row_length\n shape = tensor_shape.TensorShape([nrows, ncols]).concatenate(value.shape[1:])\n fields = dict(((k, _partition_outer_dimension(v, row_partition)) for k, v in value._fields.items()))\n return StructuredTensor._old_init(fields, shape, row_partition.nrows(), (row_partition,) + value.row_partitions)", + "docstring": "Partitions the outer dimension of using . Examples: >>> partition = RowPartition.from_row_lengths([2, 0, 1]) >>> _partition_outer_dimension(tf.constant([1, 2, 3]), partition) >>> struct_value = tf.experimental.StructuredTensor.from_pyval( ... [{'x': 1}, {'x': 2}, {'x': 3}]) >>> _partition_outer_dimension(struct_value, partition) }, shape=(3, None))> Args: value: Tensor, RaggedTensor, or StructuredTensor row_partition: RowPartition Returns: A value with the same type as , where .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py", + "ast_data": "FunctionDef name:_partition_outer_dimension arg:value arg:row_partition arguments arg arg Assign Compare Call If BoolOp Call Assign Call Call Call Call Return return:yes Call If Call Return return:yes Call Call Assign Assign Assign Call Call Assign Call Call Call Return return:yes Call Call" + }, + { + "library": "sphinx", + "name": "create_template_bridge", + "source_code": "def create_template_bridge(self) -> None:\n if self.config.template_bridge:\n template_bridge_cls = import_object(self.config.template_bridge, source='template_bridge setting')\n self.templates = template_bridge_cls()\n else:\n from sphinx.jinja2glue import BuiltinTemplateLoader\n self.templates = BuiltinTemplateLoader()", + "docstring": "Return the template bridge configured.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\__init__.py", + "ast_data": "FunctionDef name:create_template_bridge arg:self arguments arg If Assign Call Assign Call Assign Call" + }, + { + "library": "cryptography", + "name": "key_size", + "source_code": "@property\n@abc.abstractmethod\ndef key_size(self) -> int:\n pass", + "docstring": "Bit size of a secret scalar for the curve.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py", + "ast_data": "FunctionDef name:key_size arg:self arguments arg" + }, + { + "library": "pandas", + "name": "convert", + "source_code": "@final\ndef convert(self) -> list[Block]:\n if not self.is_object:\n return [self.copy(deep=False)]\n if self.ndim != 1 and self.shape[0] != 1:\n blocks = self.split_and_operate(Block.convert)\n if all((blk.dtype.kind == 'O' for blk in blocks)):\n return [self.copy(deep=False)]\n return blocks\n values = self.values\n if values.ndim == 2:\n values = values[0]\n res_values = lib.maybe_convert_objects(values, convert_non_numeric=True)\n refs = None\n if res_values is values or (isinstance(res_values, NumpyExtensionArray) and res_values._ndarray is values):\n refs = self.refs\n res_values = ensure_block_shape(res_values, self.ndim)\n res_values = maybe_coerce_values(res_values)\n return [self.make_block(res_values, refs=refs)]", + "docstring": "Attempt to coerce any object types to better types. Return a copy of the block (if copy = True).", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\blocks.py", + "ast_data": "FunctionDef name:convert arg:self arguments arg If Return return:yes Call If BoolOp Compare Compare Assign Call If Call Compare Return return:yes Call Return return:yes Assign If Compare Assign Assign Call Assign If BoolOp Compare BoolOp Call Compare Assign Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_singleton_broadcaster", + "source_code": "@classmethod\ndef get_singleton_broadcaster(cls, target_size):\n return _LayerBroadcaster.from_gather_index(array_ops.zeros(target_size, dtype=target_size.dtype))", + "docstring": "Broadcast from 1 element to target_size elements.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", + "ast_data": "FunctionDef name:get_singleton_broadcaster arg:cls arg:target_size arguments arg arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_check_partial_fit_first_call", + "source_code": "def _check_partial_fit_first_call(clf, classes=None):\n if getattr(clf, 'classes_', None) is None and classes is None:\n raise ValueError('classes must be passed on the first call to partial_fit.')\n elif classes is not None:\n if getattr(clf, 'classes_', None) is not None:\n if not np.array_equal(clf.classes_, unique_labels(classes)):\n raise ValueError('`classes=%r` is not the same as on last call to partial_fit, was: %r' % (classes, clf.classes_))\n else:\n clf.classes_ = unique_labels(classes)\n return True\n return False", + "docstring": "Private helper function for factorizing common classes param logic. Estimators that implement the ``.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\multiclass.py", + "ast_data": "FunctionDef name:_check_partial_fit_first_call arg:clf arg:classes arguments arg arg If BoolOp Compare Call Compare Raise Call If Compare If Compare Call If Call Call Raise Call Assign Call Return return:yes Return return:yes" + }, + { + "library": "pytorch", + "name": "_move_placeholder_to_front", + "source_code": "def _move_placeholder_to_front(graph_module: torch.fx.GraphModule) -> None:\n graph = graph_module.graph\n placeholders = []\n first_not_placeholder = None\n for node in graph.nodes:\n if node.op == 'placeholder':\n placeholders.append(node)\n if first_not_placeholder is None and node.op != 'placeholder':\n first_not_placeholder = node\n if first_not_placeholder is None:\n return\n for placeholder in placeholders:\n first_not_placeholder.prepend(placeholder)", + "docstring": "In torch.fx.Graph, placeholder is a special assignment node. If it's not executed in the beginning, it could overwrite values computed by upstream nodes.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py", + "ast_data": "FunctionDef name:_move_placeholder_to_front arg:graph_module arguments arg Assign Assign Assign For If Compare Call If BoolOp Compare Compare Assign If Compare Return return:no For Call" + }, + { + "library": "tensorflow", + "name": "assert_existing_objects_matched", + "source_code": "def assert_existing_objects_matched(self):\n for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):\n trackable = self._checkpoint.object_by_proto_id.get(node_id, None)\n if trackable is not None and trackable._update_uid < self._checkpoint.restore_uid:\n raise AssertionError(f'Object {node} not assigned a value from checkpoint.')\n for trackable_object in util.list_objects(self._object_graph_view, self._options.experimental_skip_slot_variables):\n if isinstance(trackable_object, data_structures.TrackableDataStructure) and (not trackable_object._trackable_children(save_type=base.SaveType.CHECKPOINT)):\n continue\n self._checkpoint.all_python_objects.add(trackable_object)\n unused_python_objects = object_identity.ObjectIdentitySet(_objects_with_attributes(self._checkpoint.all_python_objects)) - object_identity.ObjectIdentitySet(self._checkpoint.object_by_proto_id.values())\n if unused_python_objects:\n num_unused_python_objects = len(list(unused_python_objects))\n num_variables_to_show = min(10, num_unused_python_objects)\n raise AssertionError(f'Found {num_unused_python_objects} Python objects that were not bound to checkpointed values, likely due to changes in the Python program. Showing {num_variables_to_show} of {num_unused_python_objects} unmatched objects: {list(unused_python_objects)[:num_variables_to_show]}')\n return self", + "docstring": "Asserts that trackable Python objects have been matched. Note that this is a weaker assertion than . It will only fail for existing Python objects which are (transitive) dependencies of the root object and which do not have an entry in the checkpoint. It will not fail, for example, if a object has not yet been built and so has not created any objects. Returns: for chaining. Raises: AssertionError: If a Python object exists in the transitive dependencies of the root object but does not have a value in the checkpoint.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", + "ast_data": "FunctionDef name:assert_existing_objects_matched arg:self arguments arg For Call Assign Call If BoolOp Compare Compare Raise Call For Call If BoolOp Call Call Call Assign Call Call Call Call If Assign Call Call Assign Call Raise Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "to_ast", + "source_code": "def to_ast(self):\n if self == STANDARD_OPTIONS:\n return parser.parse_expression('ag__.STD')\n template = '\\n ag__.ConversionOptions(\\n recursive=recursive_val,\\n user_requested=user_requested_val,\\n optional_features=optional_features_val,\\n internal_convert_user_code=internal_convert_user_code_val)\\n '\n\n def list_of_features(values):\n return parser.parse_expression('({})'.format(', '.join(('ag__.{}'.format(str(v)) for v in values))))\n expr_ast = templates.replace(template, recursive_val=parser.parse_expression(str(self.recursive)), user_requested_val=parser.parse_expression(str(self.user_requested)), internal_convert_user_code_val=parser.parse_expression(str(self.internal_convert_user_code)), optional_features_val=list_of_features(self.optional_features))\n return expr_ast[0].value", + "docstring": "Returns a representation of this object as an AST node. The AST node encodes a constructor that would create an object with the same contents. Returns: ast.Node", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\converter.py", + "ast_data": "FunctionDef name:to_ast arg:self arguments arg If Compare Return return:yes Call Assign FunctionDef name:list_of_features arg:values arguments arg Return return:yes Call Call Call Call Call Assign Call Call Call Call Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_ExecutionSignature", + "source_code": "class _ExecutionSignature(collections.namedtuple('_ExecutionSignature', ('op', 'handle', 'resources', 'exclusive_resource_access'))):\n pass", + "docstring": "A class storing an op and associated attrs.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\critical_section_ops.py", + "ast_data": "ClassDef name:_ExecutionSignature Call" + }, + { + "library": "pytorch", + "name": "get_optimizer_state_dict", + "source_code": "def get_optimizer_state_dict(model: nn.Module, optimizers: Union[torch.optim.Optimizer, Iterable[torch.optim.Optimizer]], *, submodules: Optional[set[nn.Module]]=None, options: Optional[StateDictOptions]=None) -> OptimizerStateType:\n with _gc_context():\n optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers)\n info = _verify_options(model, optimizers, optim_only=True, submodules=submodules, options=options)\n optim_state_dict = _get_optim_state_dict(model, optimizers, info)\n _verify_state_dict({}, optim_state_dict, info)\n return optim_state_dict", + "docstring": "Return the combined state_dict for optimizers. See `StateDictOptions`. :rtype: OptimizerStateType", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict.py", + "ast_data": "FunctionDef name:get_optimizer_state_dict arg:model arg:optimizers arguments arg arg arg arg With Call Assign Call Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "DQuantType", + "source_code": "class DQuantType(Enum):\n FP16 = ('fp16',)\n BFP16 = 'bfp16'\n\n def __str__(self) -> str:\n return self.value", + "docstring": "Different quantization methods for auto_quantize API are identified here. auto_quantize API currently supports fp16 and bfp16 methods.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\algorithms\\_quantization\\quantization.py", + "ast_data": "ClassDef name:DQuantType Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "@_docstring.interpd\ndef __init__(self, xy, numVertices, *, radius=5, orientation=0, **kwargs):\n self.xy = xy\n self.numvertices = numVertices\n self.orientation = orientation\n self.radius = radius\n self._path = Path.unit_regular_polygon(numVertices)\n self._patch_transform = transforms.Affine2D()\n super().__init__(**kwargs)", + "docstring": "Parameters ---------- xy : (float, float) The center position. numVertices : int The number of vertices. radius : float The distance from the center to each of the vertices. orientation : float The polygon rotation angle (in radians). **kwargs properties: %(Patch:kwdoc)s", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:xy arg:numVertices arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign Call Assign Call Call Call" + }, + { + "library": "numpy", + "name": "__init__", + "source_code": "def __init__(self, dbfunc, domain, fillx=0, filly=0):\n super().__init__(dbfunc)\n self.domain = domain\n self.fillx = fillx\n self.filly = filly\n ufunc_domain[dbfunc] = domain\n ufunc_fills[dbfunc] = (fillx, filly)", + "docstring": "abfunc(fillx, filly) must be defined. abfunc(x, filly) = x for all x to enable reduce.", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:dbfunc arg:domain arg:fillx arg:filly arguments arg arg arg arg arg Call Call Assign Assign Assign Assign Assign" + }, + { + "library": "scikit-learn", + "name": "predict_log_proba", + "source_code": "def predict_log_proba(self, X):\n return super().predict_log_proba(X)", + "docstring": "Return log of posterior probabilities of classification. Parameters ---------- X : array-like of shape (n_samples, n_features) Array of samples/test vectors. Returns ------- C : ndarray of shape (n_samples, n_classes) Posterior log-probabilities of classification per class.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py", + "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_decision_function", + "source_code": "def _decision_function(self, X):\n check_is_fitted(self)\n if sparse.issparse(X):\n return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_\n else:\n return super()._decision_function(X)", + "docstring": "Decision function of the linear model. Parameters ---------- X : numpy array or scipy.sparse matrix of shape (n_samples, n_features) Returns ------- T : ndarray of shape (n_samples,) The predicted decision function.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_coordinate_descent.py", + "ast_data": "FunctionDef name:_decision_function arg:self arg:X arguments arg arg Call If Call Return return:yes Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "Permission", + "source_code": "class Permission(models.Model):\n name = models.CharField(_('name'), max_length=255)\n content_type = models.ForeignKey(ContentType, models.CASCADE, verbose_name=_('content type'))\n codename = models.CharField(_('codename'), max_length=100)\n objects = PermissionManager()\n\n class Meta:\n verbose_name = _('permission')\n verbose_name_plural = _('permissions')\n unique_together = [['content_type', 'codename']]\n ordering = ['content_type__app_label', 'content_type__model', 'codename']\n\n def __str__(self):\n return '%s | %s' % (self.content_type, self.name)\n\n def natural_key(self):\n return (self.codename, *self.content_type.natural_key())\n natural_key.dependencies = ['contenttypes.contenttype']", + "docstring": "The permissions system provides a way to assign permissions to specific users and groups of users. The permission system is used by the Django admin site, but may also be useful in your own code. The Django admin site uses permissions as follows: - The \"add\" permission limits the user's ability to view the \"add\" form and add an object. - The \"change\" permission limits a user's ability to view the change list, view the \"change\" form and change an object. - The \"delete\" permission limits the ability to delete an object. - The \"view\" permission limits the ability to view an object. Permissions are set globally per type of object, not per specific object instance. It is possible to say \"Mary may change news stories,\" but it's not currently possible to say \"Mary may change news stories, but only the ones she created herself\" or \"Mary may only change news stories that have a certain status or publication date.\" The permissions listed above are automatically created for each model.", + "type": "class", + "file_path": "django\\django\\contrib\\auth\\models.py", + "ast_data": "ClassDef name:Permission Assign Call Call Assign Call Call Assign Call Call Assign Call ClassDef name:Meta Assign Call Assign Call Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:natural_key arg:self arguments arg Return return:yes Call Assign" + }, + { + "library": "django", + "name": "InMemoryFileNode", + "source_code": "class InMemoryFileNode(ContentFile, TimingMixin):\n\n def __init__(self, content='', name=None):\n super().__init__(content, name)\n self._content_type = type(content)\n self._initialize_times()\n\n def open(self, mode):\n self._convert_stream_content(mode)\n self._update_accessed_time()\n return super().open(mode)\n\n def write(self, data):\n super().write(data)\n self._update_modified_time()\n\n def _initialize_stream(self):\n self.file = io.BytesIO() if self._content_type == bytes else io.StringIO()\n\n def _convert_stream_content(self, mode):\n new_content_type = bytes if 'b' in mode else str\n if self._content_type == new_content_type:\n return\n content = self.file.getvalue()\n content = content.encode() if isinstance(content, str) else content.decode()\n self._content_type = new_content_type\n self._initialize_stream()\n self.file.write(content)", + "docstring": "Helper class representing an in-memory file node. Handle unicode/bytes conversion during I/O operations and record creation, modification, and access times.", + "type": "class", + "file_path": "django\\django\\core\\files\\storage\\memory.py", + "ast_data": "ClassDef name:InMemoryFileNode FunctionDef name:__init__ arg:self arg:content arg:name arguments arg arg arg Call Call Assign Call Call FunctionDef name:open arg:self arg:mode arguments arg arg Call Call Return return:yes Call Call FunctionDef name:write arg:self arg:data arguments arg arg Call Call Call FunctionDef name:_initialize_stream arg:self arguments arg Assign Compare Call Call FunctionDef name:_convert_stream_content arg:self arg:mode arguments arg arg Assign Compare If Compare Return return:no Assign Call Assign Call Call Call Assign Call Call" + }, + { + "library": "matplotlib", + "name": "draw_all", + "source_code": "@classmethod\ndef draw_all(cls, force=False):\n for manager in cls.get_all_fig_managers():\n if force or manager.canvas.figure.stale:\n manager.canvas.draw_idle()", + "docstring": "Redraw all stale managed figures, or, if *force* is True, all managed figures.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_pylab_helpers.py", + "ast_data": "FunctionDef name:draw_all arg:cls arg:force arguments arg arg For Call If BoolOp Call" + }, + { + "library": "pandas", + "name": "_transform_doc", + "source_code": "def _transform_doc(self) -> bytes:\n from lxml.etree import XSLT, XMLParser, fromstring, parse\n style_doc = self.stylesheet\n assert style_doc is not None\n handle_data = get_data_from_filepath(filepath_or_buffer=style_doc, encoding=self.encoding, compression=self.compression, storage_options=self.storage_options)\n with handle_data as xml_data:\n curr_parser = XMLParser(encoding=self.encoding)\n if isinstance(xml_data, io.StringIO):\n xsl_doc = fromstring(xml_data.getvalue().encode(self.encoding), parser=curr_parser)\n else:\n xsl_doc = parse(xml_data, parser=curr_parser)\n transformer = XSLT(xsl_doc)\n new_doc = transformer(self.root)\n return bytes(new_doc)", + "docstring": "Parse stylesheet from file or buffer and run it. This method will parse stylesheet object into tree for parsing conditionally by its specific object type, then transforms original tree with XSLT script.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\xml.py", + "ast_data": "FunctionDef name:_transform_doc arg:self arguments arg Assign Compare Assign Call With Assign Call If Call Assign Call Call Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\ndef __init__(self, concentration, validate_args=False, allow_nan_stats=True, name='Dirichlet'):\n parameters = dict(locals())\n with ops.name_scope(name, values=[concentration]) as name:\n self._concentration = self._maybe_assert_valid_concentration(ops.convert_to_tensor(concentration, name='concentration'), validate_args)\n self._total_concentration = math_ops.reduce_sum(self._concentration, -1)\n super(Dirichlet, self).__init__(dtype=self._concentration.dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, reparameterization_type=distribution.FULLY_REPARAMETERIZED, parameters=parameters, graph_parents=[self._concentration, self._total_concentration], name=name)", + "docstring": "Initialize a batch of Dirichlet distributions. Args: concentration: Positive floating-point indicating mean number of class occurrences; aka \"alpha\". Implies , and , , i.e., if then and . validate_args: Python , default . When distribution parameters are checked for validity despite possibly degrading runtime performance. When invalid inputs may silently render incorrect outputs. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. name: Python name prefixed to Ops created by this class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:concentration arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg Assign Call Call With Call Assign Call Call Assign Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_composite_tensor_prefer_static_fields", + "source_code": "@property\ndef _composite_tensor_prefer_static_fields(self):\n return ()", + "docstring": "A tuple of names referring to parameters that may be treated statically. This is a subset of , and contains the names of of -like args to the s constructor that may be stored as static values, if they are statically known. These are typically shapes or axis values.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:_composite_tensor_prefer_static_fields arg:self arguments arg Return return:no" + }, + { + "library": "tensorflow", + "name": "get_sparse_tensors", + "source_code": "def get_sparse_tensors(self, transformation_cache, state_manager):\n input_tensor = transformation_cache.get(self, state_manager)\n return self._get_sparse_tensors_for_input_tensor(input_tensor)", + "docstring": "Converts dense inputs to SparseTensor so downstream code can use it.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:get_sparse_tensors arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_convert_mask", + "source_code": "def _convert_mask(self, states_dict, sparse_coo=True):\n states = copy.deepcopy(states_dict)\n for state in states.values():\n if state['mask'] is not None:\n if isinstance(state['mask'], list):\n for idx in range(len(state['mask'])):\n if sparse_coo:\n state['mask'][idx] = state['mask'][idx].to_sparse_coo()\n else:\n state['mask'][idx] = state['mask'][idx].to_dense()\n elif sparse_coo:\n state['mask'] = state['mask'].to_sparse_coo()\n else:\n state['mask'] = state['mask'].to_dense()\n return states", + "docstring": "Converts the mask to sparse coo or dense depending on the argument. If , then the mask is stored as sparse coo else dense tensor", + "type": "method", + "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py", + "ast_data": "FunctionDef name:_convert_mask arg:self arg:states_dict arg:sparse_coo arguments arg arg arg Assign Call For Call If Compare If Call For Call Call If Assign Call Assign Call If Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_verify_inefficient_unroll", + "source_code": "def _verify_inefficient_unroll(self):\n assert self.ops_before_iteration is not None\n ops_after_iteration = self._get_ops()\n new_ops = tuple((op for op in ops_after_iteration if op not in self.ops_before_iteration))\n if len(new_ops) < INEFFICIENT_UNROLL_MIN_OPS:\n return False\n ag_logging.warning('Large unrolled loop detected. Did you mean to use a TF loop? The following ops were created after iteration %s: %s\\nSee https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/common_errors.md#warning-large-unrolled-loop-detected\\nLocation:\\n%s', self.iterations, new_ops, '\\n'.join(traceback.format_stack()))\n return True", + "docstring": "Checks for possibly-inefficient creation of ops in a Python loop.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py", + "ast_data": "FunctionDef name:_verify_inefficient_unroll arg:self arguments arg Compare Assign Call Assign Call Compare If Compare Call Return return:yes Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, path_to_so_file: str, alloc_fn_name: str, free_fn_name: str):\n allocator = ctypes.CDLL(path_to_so_file)\n alloc_fn = ctypes.cast(getattr(allocator, alloc_fn_name), ctypes.c_void_p).value\n free_fn = ctypes.cast(getattr(allocator, free_fn_name), ctypes.c_void_p).value\n assert alloc_fn is not None\n assert free_fn is not None\n self._allocator = torch._C._cuda_customAllocator(alloc_fn, free_fn)", + "docstring": "Memory allocators are compiled in .so files and loaded dynamically using ctypes. To change the active allocator use the :func: function. Args: path_to_so_file(str): Path in the filesystem to the file containing the allocator functions alloc_fn_name(str): Name of the function to perform the memory allocation in the so file. The signature must be: void* alloc_fn_name(ssize_t size, int device, cudaStream_t stream); free_fn_name(str): Name of the function to perform the memory release in the so file. The signature must be: void free_fn_name(void* ptr, size_t size, cudaStream_t stream); .. warning:: This is currently supported only in unix OSs .. note:: See :ref: for details on creating and using a custom allocator", + "type": "method", + "file_path": "pytorch\\torch\\cuda\\memory.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:path_to_so_file arg:alloc_fn_name arg:free_fn_name arguments arg arg arg arg Assign Call Assign Call Call Assign Call Call Compare Compare Assign Call" + }, + { + "library": "matplotlib", + "name": "_get_autoscale_on", + "source_code": "def _get_autoscale_on(self):\n return self._autoscale_on", + "docstring": "Return whether this Axis is autoscaled.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:_get_autoscale_on arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "_C_contiguous_copy", + "source_code": "def _C_contiguous_copy(A):\n A = np.asarray(A)\n if A.flags.c_contiguous:\n A = A.copy()\n else:\n A = np.ascontiguousarray(A)\n return A", + "docstring": "Same as np.ascontiguousarray, but ensure a copy", + "type": "function", + "file_path": "scipy\\scipy\\linalg\\interpolative.py", + "ast_data": "FunctionDef name:_C_contiguous_copy arg:A arguments arg Assign Call If Assign Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, images: Tensor) -> Tensor:\n feats = self.backbone(images)\n return feats", + "docstring": "Extract features from the input images. Args: images: input images tensor of shape :math:. Returns: Dict[str, Tensor]: a dictionary containing the features.", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\models\\efficient_vit\\model.py", + "ast_data": "FunctionDef name:forward arg:self arg:images arguments arg arg Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_build_shuffle_hybrid", + "source_code": "def _build_shuffle_hybrid(input_tensors, gather_devices, red_op, upper_level_f):\n input_tensors, shape = _flatten_tensors(input_tensors)\n devices = [t.device for t in input_tensors]\n per_worker_devices, per_worker_values = _split_by_task(devices, input_tensors)\n num_workers = len(per_worker_devices)\n up_values = []\n if len(gather_devices) != num_workers:\n raise ValueError('For shuffle hybrid, gather_devices must contain one device per worker. ')\n for w in range(0, num_workers):\n reduced_shards = _build_shuffle_gather(per_worker_values[w], [gather_devices[w]], red_op)\n up_values.append(reduced_shards[0])\n level_2_output = upper_level_f(up_values)\n output_tensors = []\n for w in range(0, num_workers):\n output_tensors += _build_shuffle_scatter([level_2_output[w]], per_worker_devices[w])\n if len(shape) != 1:\n output_tensors = _reshape_tensors(output_tensors, shape)\n return output_tensors", + "docstring": "Construct a subgraph for Shuffle hybrid all-reduce. Args: input_tensors: list of of same-shape and type values to be reduced. gather_devices: list of device names on which to host gather shards. red_op: binary elementwise reduction operator. upper_level_f: function for reducing one value per worker, across workers. Returns: list of of reduced values. Raises: ValueError: inputs not well-formed.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py", + "ast_data": "FunctionDef name:_build_shuffle_hybrid arg:input_tensors arg:gather_devices arg:red_op arg:upper_level_f arguments arg arg arg arg Assign Call Assign Assign Call Assign Call Assign If Compare Call Raise Call For Call Assign Call Call Assign Call Assign For Call Call If Compare Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_compute_dtype", + "source_code": "@property\ndef _compute_dtype(self):\n return self._dtype_policy.compute_dtype", + "docstring": "The layer's compute dtype. Unless mixed-precision is used, this is the same as . If self._autocast is True, layer's will cast floating-point inputs to this. Returns: The layer's compute dtype.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", + "ast_data": "FunctionDef name:_compute_dtype arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, patches, *, match_original=False, **kwargs):\n if match_original:\n\n def determine_facecolor(patch):\n if patch.get_fill():\n return patch.get_facecolor()\n return [0, 0, 0, 0]\n kwargs['facecolors'] = [determine_facecolor(p) for p in patches]\n kwargs['edgecolors'] = [p.get_edgecolor() for p in patches]\n kwargs['linewidths'] = [p.get_linewidth() for p in patches]\n kwargs['linestyles'] = [p.get_linestyle() for p in patches]\n kwargs['antialiaseds'] = [p.get_antialiased() for p in patches]\n super().__init__(**kwargs)\n self.set_paths(patches)", + "docstring": "Parameters ---------- patches : list of A sequence of Patch objects. This list may include a heterogeneous assortment of different patch types. match_original : bool, default: False If True, use the colors and linewidths of the original patches. If False, new colors may be assigned by providing the standard collection arguments, facecolor, edgecolor, linewidths, norm or cmap. **kwargs All other parameters are forwarded to . If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds* are None, they default to their patch setting, in sequence form. Notes ----- The use of functionality is optional. If the matrix `~.ScalarMappable.set_array`), at draw time a call to scalar mappable will be made to set the face colors.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:patches arguments arg arg arg arg If FunctionDef name:determine_facecolor arg:patch arguments arg If Call Return return:yes Call Return return:yes Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call" + }, + { + "library": "pytorch", + "name": "reorder_pre_hook_nodes_to_mimic_eager", + "source_code": "def reorder_pre_hook_nodes_to_mimic_eager(self):\n pre_hooks = []\n for node in self.fx_tracer.graph.find_nodes(op='call_function', target=call_hook):\n if node.kwargs.get('hook_type', None) != 'pre_hook':\n continue\n pre_hooks.append(node)\n for node in reversed(pre_hooks):\n hook_getitem_node = node.args[0]\n users = list(node.users.keys())\n if len(users) == 0:\n continue\n assert all((user.op == 'call_function' and user.target == operator.getitem for user in users))\n registered_node = next(iter(users[0].users.keys()))\n if registered_node is not node.next:\n registered_node.prepend(hook_getitem_node)\n registered_node.prepend(node)\n for getitem in users:\n registered_node.prepend(getitem)", + "docstring": "Usage of AOTAutograd causes all the pre_hook nodes to get pushed to the end of the graph. This differs from eager mode, which schedules them right before their registered node execution. This pass attempts to reorder the graph to mimic eager behavior.", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\compiled_autograd.py", + "ast_data": "FunctionDef name:reorder_pre_hook_nodes_to_mimic_eager arg:self arguments arg Assign For Call If Compare Call Call For Call Assign Assign Call Call If Compare Call Call BoolOp Compare Compare Assign Call Call Call If Compare Call Call For Call" + }, + { + "library": "pytorch", + "name": "scalar", + "source_code": "def scalar(name, tensor, collections=None, new_style=False, double_precision=False):\n tensor = make_np(tensor).squeeze()\n assert tensor.ndim == 0, f'Tensor should contain one element (0 dimensions). Was given size: {tensor.size} and {tensor.ndim} dimensions.'\n scalar = float(tensor)\n if new_style:\n tensor_proto = TensorProto(float_val=[scalar], dtype='DT_FLOAT')\n if double_precision:\n tensor_proto = TensorProto(double_val=[scalar], dtype='DT_DOUBLE')\n plugin_data = SummaryMetadata.PluginData(plugin_name='scalars')\n smd = SummaryMetadata(plugin_data=plugin_data)\n return Summary(value=[Summary.Value(tag=name, tensor=tensor_proto, metadata=smd)])\n else:\n return Summary(value=[Summary.Value(tag=name, simple_value=scalar)])", + "docstring": "Output a protocol buffer containing a single scalar value. The generated Summary has a Tensor.proto containing the input Tensor. Args: name: A name for the generated node. Will also serve as the series name in TensorBoard. tensor: A real numeric Tensor containing a single value. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to . new_style: Whether to use new style (tensor field) or old style (simple_value field). New style could lead to faster data loading. Returns: A scalar of type . Which contains a protobuf. Raises: ValueError: If tensor has the wrong shape or type.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\tensorboard\\summary.py", + "ast_data": "FunctionDef name:scalar arg:name arg:tensor arg:collections arg:new_style arg:double_precision arguments arg arg arg arg arg Assign Call Call Compare Assign Call If Assign Call If Assign Call Assign Call Assign Call Return return:yes Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "restore", + "source_code": "def restore(self, restored_tensors, restored_shapes):\n raise ValueError('Calling an abstract method.')", + "docstring": "Restores this object from 'restored_tensors'. Args: restored_tensors: the tensors that were loaded from a checkpoint restored_shapes: the shapes this object should conform to after restore, or None. Returns: An operation that restores the state of the object. Raises: ValueError: If the object cannot be restored using the provided parameters.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object.py", + "ast_data": "FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg Raise Call" + }, + { + "library": "cherrypy", + "name": "mount", + "source_code": "def mount(self, root, script_name='', config=None):\n if script_name is None:\n raise TypeError(\"The 'script_name' argument may not be None. Application objects may, however, possess a script_name of None (in order to inpect the WSGI environ for SCRIPT_NAME upon each request). You cannot mount such Applications on this Tree; you must pass them to a WSGI server interface directly.\")\n script_name = script_name.rstrip('/')\n if isinstance(root, Application):\n app = root\n if script_name != '' and script_name != app.script_name:\n raise ValueError('Cannot specify a different script name and pass an Application instance to cherrypy.mount')\n script_name = app.script_name\n else:\n app = Application(root, script_name)\n needs_favicon = script_name == '' and root is not None and (not hasattr(root, 'favicon_ico'))\n if needs_favicon:\n favicon = os.path.join(os.getcwd(), os.path.dirname(__file__), 'favicon.ico')\n root.favicon_ico = tools.staticfile.handler(favicon)\n if config:\n app.merge(config)\n self.apps[script_name] = app\n return app", + "docstring": "Mount a new app from a root object, script_name, and config. root An instance of a \"controller class\" (a collection of page handler methods) which represents the root of the application. This may also be an Application instance, or None if using a dispatcher other than the default. script_name A string containing the \"mount point\" of the application. This should start with a slash, and be the path portion of the URL at which to mount the given root. For example, if root.index() will handle requests to \" then the script_name argument would be \"/dept/app1\". It MUST NOT end in a slash. If the script_name refers to the root of the URI, it MUST be an empty string (not \"/\"). config A file or dict containing application config.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cptree.py", + "ast_data": "FunctionDef name:mount arg:self arg:root arg:script_name arg:config arguments arg arg arg arg If Compare Raise Call Assign Call If Call Assign If BoolOp Compare Compare Raise Call Assign Assign Call Assign BoolOp Compare Compare Call If Assign Call Call Call Assign Call If Call Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_forward_pass", + "source_code": "def _forward_pass(self, activations):\n hidden_activation = ACTIVATIONS[self.activation]\n for i in range(self.n_layers_ - 1):\n activations[i + 1] = safe_sparse_dot(activations[i], self.coefs_[i])\n activations[i + 1] += self.intercepts_[i]\n if i + 1 != self.n_layers_ - 1:\n hidden_activation(activations[i + 1])\n output_activation = ACTIVATIONS[self.out_activation_]\n output_activation(activations[i + 1])\n return activations", + "docstring": "Perform a forward pass on the network by computing the values of the neurons in the hidden layers and the output layer. Parameters ---------- activations : list, length = n_layers - 1 The ith element of the list holds the values of the ith layer.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py", + "ast_data": "FunctionDef name:_forward_pass arg:self arg:activations arguments arg arg Assign For Call Assign Call If Compare Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_make_nan_reduction", + "source_code": "def _make_nan_reduction(np_fun_name, reduction, init_val):\n\n @np_utils.np_doc(np_fun_name)\n def nan_reduction(a, axis=None, dtype=None, keepdims=False):\n a = np_array_ops.array(a)\n v = np_array_ops.array(init_val, dtype=a.dtype)\n return reduction(np_array_ops.where(isnan(a), v, a), axis=axis, dtype=dtype, keepdims=keepdims)\n return nan_reduction", + "docstring": "Helper to generate nan* functions.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_math_ops.py", + "ast_data": "FunctionDef name:_make_nan_reduction arg:np_fun_name arg:reduction arg:init_val arguments arg arg arg FunctionDef name:nan_reduction arg:a arg:axis arg:dtype arg:keepdims arguments arg arg arg arg Assign Call Assign Call Return return:yes Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_metrics_by_kernel", + "source_code": "def get_metrics_by_kernel(rows: list[list[str]]) -> list[dict[str, tuple[str, str]]]:\n name_index = {}\n units = rows[1]\n for i, name in enumerate(rows[0]):\n name_index[name] = i\n results = []\n for kernel in rows[2:]:\n values = {}\n for idx, name in enumerate(rows[0]):\n values[name] = (kernel[idx], units[idx])\n results.append(values)\n return results", + "docstring": "Converts ncu-rep table to a dictionary of metrics by kernel. Args: rows: ncu-rep table rows Returns: dictionary of metrics by kernel", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\gpu\\codegen\\tools\\ncu_rep_lib.py", + "ast_data": "FunctionDef name:get_metrics_by_kernel arg:rows arguments arg Assign Assign For Call Assign Assign For Assign For Call Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "depth_from_disparity", + "source_code": "def depth_from_disparity(disparity: Tensor, baseline: float | Tensor, focal: float | Tensor) -> Tensor:\n KORNIA_CHECK_IS_TENSOR(disparity, f'Input disparity type is not a Tensor. Got {type(disparity)}.')\n KORNIA_CHECK_SHAPE(disparity, ['*', 'H', 'W'])\n KORNIA_CHECK(isinstance(baseline, (float, Tensor)), f'Input baseline should be either a float or Tensor. Got {type(baseline)}')\n KORNIA_CHECK(isinstance(focal, (float, Tensor)), f'Input focal should be either a float or Tensor. Got {type(focal)}')\n if isinstance(baseline, Tensor):\n KORNIA_CHECK_SHAPE(baseline, ['1'])\n if isinstance(focal, Tensor):\n KORNIA_CHECK_SHAPE(focal, ['1'])\n return baseline * focal / (disparity + 1e-08)", + "docstring": "Compute depth from disparity. Args: disparity: Disparity tensor of shape :math:. baseline: float/tensor containing the distance between the two lenses. focal: float/tensor containing the focal length. Return: Depth map of the shape :math:. Example: >>> disparity = torch.rand(4, 1, 4, 4) >>> baseline = torch.rand(1) >>> focal = torch.rand(1) >>> depth_from_disparity(disparity, baseline, focal).shape torch.Size([4, 1, 4, 4])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\depth.py", + "ast_data": "FunctionDef name:depth_from_disparity arg:disparity arg:baseline arg:focal arguments arg arg arg Call Call Call Call Call Call Call Call Call If Call Call If Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "interval_contains_open", + "source_code": "def interval_contains_open(interval, val):\n a, b = interval\n return a < val < b or a > val > b", + "docstring": "Check, excluding endpoints, whether an interval includes a given value. Parameters ---------- interval : (float, float) The endpoints of the interval. val : float Value to check is within interval. Returns ------- bool Whether *val* is within the *interval*.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:interval_contains_open arg:interval arg:val arguments arg arg Assign Return return:yes BoolOp Compare Compare" + }, + { + "library": "matplotlib", + "name": "draw_markers", + "source_code": "def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):\n for vertices, codes in path.iter_segments(trans, simplify=False):\n if len(vertices):\n x, y = vertices[-2:]\n self.draw_path(gc, marker_path, marker_trans + transforms.Affine2D().translate(x, y), rgbFace)", + "docstring": "Draw a marker at each of *path*'s vertices (excluding control points). The base (fallback) implementation makes multiple calls to . Backends may want to override this method in order to draw the marker only once and reuse it multiple times. Parameters ---------- gc : The graphics context. marker_path : The path for the marker. marker_trans : An affine transform applied to the marker. path : The locations to draw the markers. trans : An affine transform applied to the path. rgbFace : :mpltype:, optional", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:draw_markers arg:self arg:gc arg:marker_path arg:marker_trans arg:path arg:trans arg:rgbFace arguments arg arg arg arg arg arg arg For Call If Call Assign Call Call Call" + }, + { + "library": "sphinx", + "name": "visit_ClassDef", + "source_code": "def visit_ClassDef(self, node: ast.ClassDef) -> None:\n self.current_classes.append(node.name)\n self.add_entry(node.name)\n if self.is_final(node.decorator_list):\n self.add_final_entry(node.name)\n self.context.append(node.name)\n self.previous = node\n for child in node.body:\n self.visit(child)\n self.context.pop()\n self.current_classes.pop()", + "docstring": "Handles ClassDef node and set context.", + "type": "method", + "file_path": "sphinx\\sphinx\\pycode\\parser.py", + "ast_data": "FunctionDef name:visit_ClassDef arg:self arg:node arguments arg arg Call Call If Call Call Call Assign For Call Call Call" + }, + { + "library": "pytorch", + "name": "_register_post_backward_reshard_only_hook", + "source_code": "def _register_post_backward_reshard_only_hook(state: _FSDPState, handle: Optional[FlatParamHandle], args: tuple[Any, ...], kwargs: dict[str, Any]) -> None:\n if not torch.is_grad_enabled():\n return\n inp_tensors: Optional[list[torch.Tensor]] = None\n if not handle:\n return\n flat_param = handle.flat_param\n if torch.distributed._functional_collectives.is_torchdynamo_compiling():\n already_registered = hasattr(flat_param, '_post_backward_hook_handle')\n else:\n already_registered = hasattr(flat_param, '_post_backward_hook_state')\n if already_registered or flat_param.requires_grad:\n return\n if inp_tensors is None:\n args_flat = pytree.arg_tree_leaves(*args, **kwargs)\n inp_tensors = [obj for obj in args_flat if torch.is_tensor(obj) and obj.requires_grad]\n assert inp_tensors is not None\n hook_handle = register_multi_grad_hook(inp_tensors, functools.partial(_post_backward_reshard_only_hook, state, handle))\n if torch.distributed._functional_collectives.is_torchdynamo_compiling():\n flat_param._post_backward_hook_handle = hook_handle\n else:\n flat_param._post_backward_hook_state = (hook_handle,)", + "docstring": "Registers post-backward hooks to reshard flat parameters that do not require gradient. We register these using multi-post-grad hooks on the input activations to ensure that all gradients that may depend on the parameters have been computed before resharding.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py", + "ast_data": "FunctionDef name:_register_post_backward_reshard_only_hook arg:state arg:handle arg:args arg:kwargs arguments arg arg arg arg If Call Return return:no If Return return:no Assign If Call Assign Call Assign Call If BoolOp Return return:no If Compare Assign Call Assign BoolOp Call Compare Assign Call Call If Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "traceback", + "source_code": "@property\ndef traceback(self):\n return pywrap_tf_session.TF_OperationGetStackTrace(self._c_op)", + "docstring": "Returns the call stack from when this operation was constructed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:traceback arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "ragged_shape", + "source_code": "@dispatch.dispatch_for_api(array_ops.shape)\ndef ragged_shape(input: ragged_tensor.Ragged, name: Optional[str]=None, out_type=dtypes.int32) -> dynamic_ragged_shape.DynamicRaggedShape:\n with ops.name_scope(name, 'RaggedShape', [input]):\n return dynamic_ragged_shape.DynamicRaggedShape.from_tensor(input, out_type)", + "docstring": "Returns the shape of a RaggedTensor. Args: input: A name: A name for the operation (optional). out_type: dtype used to encode the shape. Returns: A", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py", + "ast_data": "FunctionDef name:ragged_shape arg:input arg:name arg:out_type arguments arg arg arg With Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, scale, growth_factor, bucket_count):\n super(ExponentialBuckets, self).__init__(pywrap_tfe.TFE_MonitoringNewExponentialBuckets(scale, growth_factor, bucket_count))", + "docstring": "Creates a new exponential Buckets. Args: scale: float growth_factor: float bucket_count: integer", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:scale arg:growth_factor arg:bucket_count arguments arg arg arg arg Call Call Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, seq):\n self.seq = seq\n self.offset_string = ''", + "docstring": "Set the sequence *seq* of strings that will be used for labels.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:seq arguments arg arg Assign Assign" + }, + { + "library": "pandas", + "name": "validate_indices", + "source_code": "def validate_indices(indices: np.ndarray, n: int) -> None:\n if len(indices):\n min_idx = indices.min()\n if min_idx < -1:\n msg = f\"'indices' contains values less than allowed ({min_idx} < -1)\"\n raise ValueError(msg)\n max_idx = indices.max()\n if max_idx >= n:\n raise IndexError('indices are out-of-bounds')", + "docstring": "Perform bounds-checking for an indexer. -1 is allowed for indicating missing values. Parameters ---------- indices : ndarray n : int Length of the array being indexed. Raises ------ ValueError Examples -------- >>> validate_indices(np.array([1, 2]), 3) # OK >>> validate_indices(np.array([1, -2]), 3) Traceback (most recent call last): ... ValueError: negative dimensions are not allowed >>> validate_indices(np.array([1, 2, 3]), 3) Traceback (most recent call last): ... IndexError: indices are out-of-bounds >>> validate_indices(np.array([-1, -1]), 0) # OK >>> validate_indices(np.array([0, 1]), 0) Traceback (most recent call last): ... IndexError: indices are out-of-bounds", + "type": "function", + "file_path": "pandas\\pandas\\core\\indexers\\utils.py", + "ast_data": "FunctionDef name:validate_indices arg:indices arg:n arguments arg arg If Call Assign Call If Compare Assign Raise Call Assign Call If Compare Raise Call" + }, + { + "library": "scipy", + "name": "get_thunk_type_set", + "source_code": "def get_thunk_type_set():\n it_types = []\n i_types = []\n j = 0\n getter_code = ' if (0) {}'\n for I_typenum, I_type in I_TYPES:\n piece = '\\n else if (I_typenum == %(I_typenum)s) {\\n if (T_typenum == -1) { return %(j)s; }'\n getter_code += piece % dict(I_typenum=I_typenum, j=j)\n i_types.append((j, I_typenum, None, I_type, None))\n j += 1\n for T_typenum, T_type in T_TYPES:\n piece = '\\n else if (T_typenum == %(T_typenum)s) { return %(j)s; }'\n getter_code += piece % dict(T_typenum=T_typenum, j=j)\n it_types.append((j, I_typenum, T_typenum, I_type, T_type))\n j += 1\n getter_code += '\\n }'\n return (i_types, it_types, GET_THUNK_CASE_TEMPLATE % dict(content=getter_code))", + "docstring": "Get a list containing cartesian product of data types, plus a getter routine. Returns ------- i_types : list [(j, I_typenum, None, I_type, None), ...] Pairing of index type numbers and the corresponding C++ types, and an unique index . This is for routines that are parameterized only by I but not by T. it_types : list [(j, I_typenum, T_typenum, I_type, T_type), ...] Same as , but for routines parameterized both by T and I. getter_code : str C++ code for a function that takes I_typenum, T_typenum and returns the unique index corresponding to the lists, or -1 if no match was found.", + "type": "function", + "file_path": "scipy\\scipy\\sparse\\_generate_sparsetools.py", + "ast_data": "FunctionDef name:get_thunk_type_set arguments Assign Assign Assign Assign For Assign Call Call For Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "add_audio", + "source_code": "def add_audio(self, tag, snd_tensor, global_step=None, sample_rate=44100, walltime=None):\n torch._C._log_api_usage_once('tensorboard.logging.add_audio')\n self._get_file_writer().add_summary(audio(tag, snd_tensor, sample_rate=sample_rate), global_step, walltime)", + "docstring": "Add audio data to summary. Args: tag (str): Data identifier snd_tensor (torch.Tensor): Sound data global_step (int): Global step value to record sample_rate (int): sample rate in Hz walltime (float): Optional override default walltime (time.time()) seconds after epoch of event Shape: snd_tensor: :math:. The values should lie between [-1, 1].", + "type": "method", + "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py", + "ast_data": "FunctionDef name:add_audio arg:self arg:tag arg:snd_tensor arg:global_step arg:sample_rate arg:walltime arguments arg arg arg arg arg arg Call Call Call Call" + }, + { + "library": "virtualenv", + "name": "executables_for_win_pypy_less_v37", + "source_code": "def executables_for_win_pypy_less_v37(self):\n creator = self.describe\n if isinstance(creator, Pypy3Windows) and creator.less_v37:\n for exe in creator.executables(self.interpreter):\n exe.run(creator, self.symlinks)", + "docstring": "PyPy <= 3.6 (v7.3.3) for Windows contains only pypy3.exe and pypy3w.exe Venv does not handle non-existing exe sources, e.g. python.exe, so this patch does it.", + "type": "method", + "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\venv.py", + "ast_data": "FunctionDef name:executables_for_win_pypy_less_v37 arg:self arguments arg Assign If BoolOp Call For Call Call" + }, + { + "library": "pytorch", + "name": "get_root_mesh_dim", + "source_code": "def get_root_mesh_dim(self, device_mesh: 'DeviceMesh') -> Optional[int]:\n root_mesh = self.get_root_mesh(device_mesh)\n child_mesh_dim_names = device_mesh.mesh_dim_names\n if root_mesh and child_mesh_dim_names:\n assert len(child_mesh_dim_names) == 1, 'The submesh can only be a 1D mesh.'\n child_mesh_dim_name = child_mesh_dim_names[0]\n return self.get_mesh_dim_by_name(root_mesh, child_mesh_dim_name)\n return None", + "docstring": "Returns the index of the mesh dim in the root mesh. The device_mesh passed in needs to be sliced out from the root mesh or submesh of the root mesh.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\device_mesh.py", + "ast_data": "FunctionDef name:get_root_mesh_dim arg:self arg:device_mesh arguments arg arg Assign Call Assign If BoolOp Compare Call Assign Return return:yes Call Return return:no" + }, + { + "library": "pytorch", + "name": "BackwardCFunction", + "source_code": "class BackwardCFunction(_C._FunctionBase, FunctionCtx, _HookMixin):\n\n def apply(self, *args):\n backward_fn = self._forward_cls.backward\n vjp_fn = self._forward_cls.vjp\n if backward_fn is not Function.backward and vjp_fn is not Function.vjp:\n raise RuntimeError(\"Implementing both 'backward' and 'vjp' for a custom Function is not allowed. You should only implement one of them.\")\n user_fn = vjp_fn if vjp_fn is not Function.vjp else backward_fn\n return user_fn(self, *args)\n\n def apply_jvp(self, *args):\n return self._forward_cls.jvp(self, *args)\n\n def _compiled_autograd_key(self):\n return self._forward_cls._compiled_autograd_key(self)", + "docstring": "This class is used for internal autograd work. Do not use.", + "type": "class", + "file_path": "pytorch\\torch\\autograd\\function.py", + "ast_data": "ClassDef name:BackwardCFunction FunctionDef name:apply arg:self arguments arg arg Assign Assign If BoolOp Compare Compare Raise Call Assign Compare Return return:yes Call FunctionDef name:apply_jvp arg:self arguments arg arg Return return:yes Call FunctionDef name:_compiled_autograd_key arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "track_external", + "source_code": "def track_external(self, *external: Union[nn.Module, optim.Optimizer, torch.Tensor]) -> None:\n flat_external, _ = tree_flatten(external)\n for obj in flat_external:\n if isinstance(obj, torch.Tensor):\n self._update_and_maybe_create_winfos(obj, _MemRefType.OTH)\n elif isinstance(obj, torch.nn.Module):\n self._track_module_params_and_buffers(obj, install_grad_hooks=False)\n elif isinstance(obj, optim.Optimizer):\n self._track_optimizer_states(_MemRefType.OPT, obj)\n elif obj is None:\n continue\n else:\n raise TypeError(f'Object of type {type(obj)} is not supported for tracking. Only stateful objects like modules, optimizers, and tensors are supported.')", + "docstring": "Track tensors and stateful objects like modules, optimizers etc. that are created outside the MemTracker. This method should be called before the ``. Args: *external (Union[nn.Module, optim.Optimizer, torch.Tensor]): The external modules, optimizers, and tensors to be tracked.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py", + "ast_data": "FunctionDef name:track_external arg:self arguments arg arg Assign Call For If Call Call If Call Call If Call Call If Compare Raise Call Call" + }, + { + "library": "tensorflow", + "name": "_add_weight", + "source_code": "def _add_weight(self, name, initial_value, dtype=None):\n variable = variable_v1.VariableV1(initial_value=initial_value, name=name, dtype=dtype, trainable=False, use_resource=True, synchronization=variables.VariableSynchronization.AUTO, aggregation=variables.VariableAggregation.NONE)\n if context.executing_eagerly():\n graph_key = None\n else:\n graph = ops.get_default_graph()\n graph_key = graph._graph_key\n key = (name, graph_key)\n self._weights[key] = variable\n self._handle_deferred_dependencies(name=name, trackable=variable)\n backend.track_variable(variable)\n return variable", + "docstring": "Adds a weight to this loss scale. Args: name: Variable name. initial_value: The variable's initial value. dtype: The type of the variable. Returns: A variable. Raises: RuntimeError: If a weight with has already been added.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py", + "ast_data": "FunctionDef name:_add_weight arg:self arg:name arg:initial_value arg:dtype arguments arg arg arg arg Assign Call If Call Assign Assign Call Assign Assign Assign Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "_no_match", + "source_code": "def _no_match(dm: Tensor) -> Tuple[Tensor, Tensor]:\n dists = torch.empty(0, 1, device=dm.device, dtype=dm.dtype)\n idxs = torch.empty(0, 2, device=dm.device, dtype=torch.long)\n return (dists, idxs)", + "docstring": "Output empty tensors. Returns: - Descriptor distance of matching descriptors, shape of :math:. - Long tensor indexes of matching descriptors in desc1 and desc2, shape of :math:.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\matching.py", + "ast_data": "FunctionDef name:_no_match arg:dm arguments arg Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "split_arg_into_blocks", + "source_code": "def split_arg_into_blocks(block_dims, block_dims_fn, arg, axis=-1):\n block_sizes = [dim.value for dim in block_dims]\n if any((d is None for d in block_sizes)):\n block_sizes = block_dims_fn()\n return array_ops.split(arg, block_sizes, axis=axis)", + "docstring": "Split into blocks matching 's . Specifically, if we have a blockwise lower-triangular matrix, with block sizes along the diagonal , this method splits on into tensors, whose shape at is . Args: block_dims: Iterable of . block_dims_fn: Callable returning an iterable of s. arg: . is split into tensors. axis: Python representing the axis to split on. Returns: A list of s.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py", + "ast_data": "FunctionDef name:split_arg_into_blocks arg:block_dims arg:block_dims_fn arg:arg arg:axis arguments arg arg arg arg Assign If Call Compare Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "__init__", + "source_code": "def __init__(self, verbose_name=None, srid=4326, spatial_index=True, **kwargs):\n self.spatial_index = spatial_index\n self.srid = srid\n kwargs['verbose_name'] = verbose_name\n super().__init__(**kwargs)", + "docstring": "The initialization function for base spatial fields. Takes the following as keyword arguments: srid: The spatial reference system identifier, an OGC standard. Defaults to 4326 (WGS84). spatial_index: Indicates whether to create a spatial index. Defaults to True. Set this instead of 'db_index' for geographic fields since index creation is different for geometry columns.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\models\\fields.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:verbose_name arg:srid arg:spatial_index arguments arg arg arg arg arg Assign Assign Assign Call Call" + }, + { + "library": "tensorflow", + "name": "erase", + "source_code": "def erase(self, keys, name=None):\n if keys.dtype != self._key_dtype:\n raise TypeError('Signature mismatch. Keys must be dtype %s, got %s.' % (self._key_dtype, keys.dtype))\n with ops.name_scope(name, '%s_lookup_table_remove' % self.name, (self.resource_handle, keys, self._default_value)):\n op = gen_lookup_ops.lookup_table_remove_v2(self.resource_handle, keys)\n return op", + "docstring": "Removes and its associated values from the table. If a key is not present in the table, it is silently ignored. Args: keys: Keys to remove. Can be a tensor of any shape. Must match the table's key type. name: A name for the operation (optional). Returns: The created Operation. Raises: TypeError: when do not match the table data types.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:erase arg:self arg:keys arg:name arguments arg arg arg If Compare Raise Call With Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_joinstyle", + "source_code": "@_docstring.interpd\ndef set_joinstyle(self, js):\n self._joinstyle = JoinStyle(js)", + "docstring": "Set the for the collection (for all its elements). Parameters ---------- js : or %(JoinStyle)s", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:set_joinstyle arg:self arg:js arguments arg arg Assign Call" + }, + { + "library": "scipy", + "name": "dctn", + "source_code": "def dctn(x, type=2, shape=None, axes=None, norm=None, overwrite_x=False):\n shape = _good_shape(x, shape, axes)\n return _pocketfft.dctn(x, type, shape, axes, norm, overwrite_x)", + "docstring": "Return multidimensional Discrete Cosine Transform along the specified axes. Parameters ---------- x : array_like The input array. type : {1, 2, 3, 4}, optional Type of the DCT (see Notes). Default type is 2. shape : int or array_like of ints or None, optional The shape of the result. If both and (see below) are None, is `shapeaxesshape`shape[i] >> import numpy as np >>> from scipy.fftpack import dctn, idctn >>> rng = np.random.default_rng() >>> y = rng.standard_normal((16, 16)) >>> np.allclose(y, idctn(dctn(y, norm='ortho'), norm='ortho')) True", + "type": "function", + "file_path": "scipy\\scipy\\fftpack\\_realtransforms.py", + "ast_data": "FunctionDef name:dctn arg:x arg:type arg:shape arg:axes arg:norm arg:overwrite_x arguments arg arg arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "KORNIA_CHECK_SAME_DEVICES", + "source_code": "def KORNIA_CHECK_SAME_DEVICES(tensors: list[Tensor], msg: Optional[str]=None, raises: bool=True) -> bool:\n KORNIA_CHECK(isinstance(tensors, list) and len(tensors) >= 1, 'Expected a list with at least one element', raises)\n if not all((tensors[0].device == x.device for x in tensors)):\n if raises:\n raise Exception(f'Not same device for tensors. Got: {[x.device for x in tensors]}.\\n{msg}')\n return False\n return True", + "docstring": "Check whether a list provided tensors live in the same device. Args: tensors: a list of tensors. msg: message to show in the exception. raises: bool indicating whether an exception should be raised upon failure. Raises: Exception: if all the tensors are not in the same device and raises is True. Example: >>> x1 = torch.rand(2, 3, 3) >>> x2 = torch.rand(1, 3, 1) >>> KORNIA_CHECK_SAME_DEVICES([x1, x2], \"Tensors not in the same device\") True", + "type": "function", + "file_path": "kornia\\kornia\\core\\check.py", + "ast_data": "FunctionDef name:KORNIA_CHECK_SAME_DEVICES arg:tensors arg:msg arg:raises arguments arg arg arg Call BoolOp Call Compare Call If Call Compare If Raise Call Return return:yes Return return:yes" + }, + { + "library": "pygame", + "name": "get_italic", + "source_code": "def get_italic(self):\n return self.oblique", + "docstring": "get_italic() -> bool check if the text will be rendered italic", + "type": "method", + "file_path": "pygame\\src_py\\ftfont.py", + "ast_data": "FunctionDef name:get_italic arg:self arguments arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "score", + "source_code": "@available_if(_estimator_has('score'))\ndef score(self, X, y, **params):\n check_is_fitted(self)\n _raise_for_params(params, self, 'score')\n if _routing_enabled():\n routed_params = process_routing(self, 'score', **params)\n else:\n routed_params = Bunch(estimator=Bunch(score={}))\n X = validate_data(self, X, accept_sparse=True, ensure_all_finite=False, reset=False)\n return self.estimator_.score(X, y, **routed_params.estimator.score)", + "docstring": "Call score on the . Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. y : array-like of shape (n_samples,) Array representing the labels. **params : dict of str -> object Parameters to pass to the underlying estimator's `enable_metadata_routing=TrueMetadata Routing User Guide estimator`.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\semi_supervised\\_self_training.py", + "ast_data": "FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg arg Call Call If Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_try_run_local_init_op", + "source_code": "def _try_run_local_init_op(self, sess: session.Session) -> Tuple[bool, Optional[str]]:\n if self._local_init_op is not None:\n is_ready_for_local_init, msg = self._model_ready_for_local_init(sess)\n if is_ready_for_local_init:\n logging.info('Running local_init_op.')\n sess.run(self._local_init_op, feed_dict=self._local_init_feed_dict, options=self._local_init_run_options)\n logging.info('Done running local_init_op.')\n return (True, None)\n else:\n return (False, msg)\n return (True, None)", + "docstring": "Tries to run _local_init_op, if not None, and is ready for local init. Args: sess: A . Returns: A tuple (is_successful, msg), where is_successful is True if _local_init_op is None, or we ran _local_init_op, and False otherwise; and msg is a with the reason why the model was not ready to run local init.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\session_manager.py", + "ast_data": "FunctionDef name:_try_run_local_init_op arg:self arg:sess arguments arg arg If Compare Assign Call If Call Call Call Return return:yes Return return:yes Return return:yes" + }, + { + "library": "pandas", + "name": "compute_list_like", + "source_code": "def compute_list_like(self, op_name: Literal['agg', 'apply'], selected_obj: Series | DataFrame, kwargs: dict[str, Any]) -> tuple[list[Hashable] | Index, list[Any]]:\n func = cast(list[AggFuncTypeBase], self.func)\n obj = self.obj\n results = []\n keys = []\n if selected_obj.ndim == 1:\n for a in func:\n colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)\n args = [self.axis, *self.args] if include_axis(op_name, colg) else self.args\n new_res = getattr(colg, op_name)(a, *args, **kwargs)\n results.append(new_res)\n name = com.get_callable_name(a) or a\n keys.append(name)\n else:\n indices = []\n for index, col in enumerate(selected_obj):\n colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])\n args = [self.axis, *self.args] if include_axis(op_name, colg) else self.args\n new_res = getattr(colg, op_name)(func, *args, **kwargs)\n results.append(new_res)\n indices.append(index)\n keys = selected_obj.columns.take(indices)\n return (keys, results)", + "docstring": "Compute agg/apply results for like-like input. Parameters ---------- op_name : {\"agg\", \"apply\"} Operation being performed. selected_obj : Series or DataFrame Data to perform operation on. kwargs : dict Keyword arguments to pass to the functions. Returns ------- keys : list[Hashable] or Index Index labels for result. results : list Data for result. When aggregating with a Series, this can contain any Python objects.", + "type": "method", + "file_path": "pandas\\pandas\\core\\apply.py", + "ast_data": "FunctionDef name:compute_list_like arg:self arg:op_name arg:selected_obj arg:kwargs arguments arg arg arg arg Assign Call Assign Assign Assign If Compare For Assign Call Assign Call Assign Call Call Call Assign BoolOp Call Call Assign For Call Assign Call Assign Call Assign Call Call Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "alias_inplace_add", + "source_code": "@deprecation.deprecated(None, 'Prefer tf.tensor_scatter_nd_add, which offers the same functionality with well-defined read-write semantics.')\ndef alias_inplace_add(x, i, v):\n return _inplace_helper(x, i, v, gen_array_ops.inplace_add)", + "docstring": "Applies an inplace add on input x at index i with value v. Aliases x. If i is None, x and v must be the same shape. Computes x += v; If i is a scalar, x has a rank 1 higher than v's. Computes x[i, :] += v; Otherwise, x and v must have the same rank. Computes x[i, :] += v; Args: x: A Tensor. i: None, a scalar or a vector. v: A Tensor. Returns: Returns x.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\inplace_ops.py", + "ast_data": "FunctionDef name:alias_inplace_add arg:x arg:i arg:v arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_test_step_fn", + "source_code": "def _test_step_fn(inputs):\n if isinstance(inputs, (tuple, list)) and len(inputs) == 2:\n inputs, targets = inputs\n else:\n targets = None\n distribute_lib.get_replica_context().merge_call(_build_model, args=(model, mode, inputs, targets))\n _, outputs, updates, _ = _per_replica_execution_function(dist_utils.get_distributed_model(model, mode), mode)\n with ops.control_dependencies([updates]):\n return [array_ops.identity(out) for out in outputs]", + "docstring": "A fn that returns output of single test step.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_distributed_v1.py", + "ast_data": "FunctionDef name:_test_step_fn arg:inputs arguments arg If BoolOp Call Compare Call Assign Assign Call Call Assign Call Call With Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_breakdown_point", + "source_code": "def _breakdown_point(n_samples, n_subsamples):\n return 1 - (0.5 ** (1 / n_subsamples) * (n_samples - n_subsamples + 1) + n_subsamples - 1) / n_samples", + "docstring": "Approximation of the breakdown point. Parameters ---------- n_samples : int Number of samples. n_subsamples : int Number of subsamples to consider. Returns ------- breakdown_point : float Approximation of breakdown point.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\linear_model\\_theil_sen.py", + "ast_data": "FunctionDef name:_breakdown_point arg:n_samples arg:n_subsamples arguments arg arg Return return:yes" + }, + { + "library": "scikit-learn", + "name": "staged_predict_proba", + "source_code": "def staged_predict_proba(self, X):\n for raw_predictions in self._staged_raw_predict(X):\n yield self._loss.predict_proba(raw_predictions)", + "docstring": "Predict class probabilities at each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Yields ------ y : generator of ndarray of shape (n_samples,) The predicted class probabilities of the input samples, for each iteration.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py", + "ast_data": "FunctionDef name:staged_predict_proba arg:self arg:X arguments arg arg For Call Call" + }, + { + "library": "tensorflow", + "name": "_registered_kl", + "source_code": "def _registered_kl(type_a, type_b):\n hierarchy_a = tf_inspect.getmro(type_a)\n hierarchy_b = tf_inspect.getmro(type_b)\n dist_to_children = None\n kl_fn = None\n for mro_to_a, parent_a in enumerate(hierarchy_a):\n for mro_to_b, parent_b in enumerate(hierarchy_b):\n candidate_dist = mro_to_a + mro_to_b\n candidate_kl_fn = _DIVERGENCES.get((parent_a, parent_b), None)\n if not kl_fn or (candidate_kl_fn and candidate_dist < dist_to_children):\n dist_to_children = candidate_dist\n kl_fn = candidate_kl_fn\n return kl_fn", + "docstring": "Get the KL function registered for classes a and b.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\kullback_leibler.py", + "ast_data": "FunctionDef name:_registered_kl arg:type_a arg:type_b arguments arg arg Assign Call Assign Call Assign Assign For Call For Call Assign Assign Call If BoolOp BoolOp Compare Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_write_graph", + "source_code": "def _write_graph(self):\n assert self._is_chief\n if self._logdir:\n training_util.write_graph(self._graph.as_graph_def(add_shapes=True), self._logdir, 'graph.pbtxt')\n if self._summary_writer and (not self._graph_added_to_summary):\n self._summary_writer.add_graph(self._graph)\n self._summary_writer.add_meta_graph(self._meta_graph_def)\n self._graph_added_to_summary = True", + "docstring": "Writes graph_def to and adds it to summary if applicable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py", + "ast_data": "FunctionDef name:_write_graph arg:self arguments arg If Call Call If BoolOp Call Call Assign" + }, + { + "library": "scikit-learn", + "name": "get_metadata_routing", + "source_code": "def get_metadata_routing(self):\n router = MetadataRouter(owner=self.__class__.__name__).add(estimator=self.estimator, method_mapping=MethodMapping().add(caller='partial_fit', callee='partial_fit').add(caller='fit', callee='fit'))\n return router", + "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.4 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_selection\\_from_model.py", + "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "nunique", + "source_code": "def nunique(self, axis: Axis=0, dropna: bool=True) -> Series:\n return self.apply(Series.nunique, axis=axis, dropna=dropna)", + "docstring": "Count number of distinct elements in specified axis. Return Series with number of distinct elements. Can ignore NaN values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. dropna : bool, default True Don't include NaN in the counts. Returns ------- Series Series with counts of unique values per row or column, depending on . See Also -------- Series.nunique: Method nunique for Series. DataFrame.count: Count non-NA cells for each column or row. Examples -------- >>> df = pd.DataFrame({\"A\": [4, 5, 6], \"B\": [4, 1, 1]}) >>> df.nunique() A 3 B 2 dtype: int64 >>> df.nunique(axis=1) 0 1 1 2 2 2 dtype: int64", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:nunique arg:self arg:axis arg:dropna arguments arg arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "draw_mathtext", + "source_code": "def draw_mathtext(self, gc, x, y, s, prop, angle):\n ox, oy, width, height, descent, font_image = self.mathtext_parser.parse(s, self.dpi, prop, antialiased=gc.get_antialiased())\n xd = descent * sin(radians(angle))\n yd = descent * cos(radians(angle))\n x = round(x + ox + xd)\n y = round(y - oy + yd)\n self._renderer.draw_text_image(font_image, x, y + 1, angle, gc)", + "docstring": "Draw mathtext using :mod:.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_agg.py", + "ast_data": "FunctionDef name:draw_mathtext arg:self arg:gc arg:x arg:y arg:s arg:prop arg:angle arguments arg arg arg arg arg arg arg Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Call Call" + }, + { + "library": "numpy", + "name": "poly2herme", + "source_code": "def poly2herme(pol):\n [pol] = pu.as_series([pol])\n deg = len(pol) - 1\n res = 0\n for i in range(deg, -1, -1):\n res = hermeadd(hermemulx(res), pol[i])\n return res", + "docstring": "poly2herme(pol) Convert a polynomial to a Hermite series. Convert an array representing the coefficients of a polynomial (relative to the \"standard\" basis) ordered from lowest degree to highest, to an array of the coefficients of the equivalent Hermite series, ordered from lowest to highest degree. Parameters ---------- pol : array_like 1-D array containing the polynomial coefficients Returns ------- c : ndarray 1-D array containing the coefficients of the equivalent Hermite series. See Also -------- herme2poly Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> import numpy as np >>> from numpy.polynomial.hermite_e import poly2herme >>> poly2herme(np.arange(4)) array([ 2., 10., 2., 3.])", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\hermite_e.py", + "ast_data": "FunctionDef name:poly2herme arg:pol arguments arg Assign Call Assign Call Assign For Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "test_name", + "source_code": "def test_name(self, **kargs):\n skip_key_list = ['device']\n test_name_str = []\n for key in kargs:\n value = kargs[key]\n test_name_str.append(('' if key in skip_key_list else key) + str(value if type(value) != bool else int(value)))\n name = (self.module_name() + '_' + '_'.join(test_name_str)).replace(' ', '')\n return name", + "docstring": "this is a globally unique name which can be used to label a specific test", + "type": "method", + "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_pytorch.py", + "ast_data": "FunctionDef name:test_name arg:self arguments arg arg Assign Assign For Assign Call Compare Call Compare Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "scrapy", + "name": "_send_report", + "source_code": "def _send_report(self, rcpts: list[str], subject: str) -> None:\n assert self.crawler.engine\n assert self.crawler.stats\n stats = self.crawler.stats\n s = f'Memory usage at engine startup : {stats.get_value('memusage/startup') / 1024 / 1024}M\\r\\n'\n s += f'Maximum memory usage : {stats.get_value('memusage/max') / 1024 / 1024}M\\r\\n'\n s += f'Current memory usage : {self.get_virtual_size() / 1024 / 1024}M\\r\\n'\n s += 'ENGINE STATUS ------------------------------------------------------- \\r\\n'\n s += '\\r\\n'\n s += pformat(get_engine_status(self.crawler.engine))\n s += '\\r\\n'\n self.mail.send(rcpts, subject, s)", + "docstring": "send notification mail with some additional useful info", + "type": "method", + "file_path": "scrapy\\scrapy\\extensions\\memusage.py", + "ast_data": "FunctionDef name:_send_report arg:self arg:rcpts arg:subject arguments arg arg arg Assign Assign Call Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "in_load_context", + "source_code": "def in_load_context():\n return _load_context.in_load_context()", + "docstring": "Returns whether under a load context.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load_context.py", + "ast_data": "FunctionDef name:in_load_context arguments Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "__lt__", + "source_code": "def __lt__(self, other_node):\n return self.split_info.gain > other_node.split_info.gain", + "docstring": "Comparison for priority queue. Nodes with high gain are higher priority than nodes with low gain. heapq.heappush only need the '<' operator. heapq.heappop take the smallest item first (smaller is higher priority). Parameters ---------- other_node : TreeNode The node to compare with.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\grower.py", + "ast_data": "FunctionDef name:__lt__ arg:self arg:other_node arguments arg arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "total_count", + "source_code": "@property\ndef total_count(self):\n return self._total_count", + "docstring": "Number of trials used to construct a sample.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet_multinomial.py", + "ast_data": "FunctionDef name:total_count arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "limit_range_for_scale", + "source_code": "def limit_range_for_scale(self, vmin, vmax, minpos):\n return (vmin, vmax)", + "docstring": "Return the range *vmin*, *vmax*, restricted to the domain supported by this scale (if any). *minpos* should be the minimum positive value in the data. This is used by log scales to determine a minimum value.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\scale.py", + "ast_data": "FunctionDef name:limit_range_for_scale arg:self arg:vmin arg:vmax arg:minpos arguments arg arg arg arg Return return:yes" + }, + { + "library": "kornia", + "name": "UnsharpMask", + "source_code": "class UnsharpMask(Module):\n\n def __init__(self, kernel_size: tuple[int, int] | int, sigma: tuple[float, float] | Tensor, border_type: str='reflect') -> None:\n super().__init__()\n self.kernel_size = kernel_size\n self.sigma = sigma\n self.border_type = border_type\n\n def forward(self, input: Tensor) -> Tensor:\n return unsharp_mask(input, self.kernel_size, self.sigma, self.border_type)", + "docstring": "Create an operator that sharpens image with: out = 2 * image - gaussian_blur2d(image). Args: kernel_size: the size of the kernel. sigma: the standard deviation of the kernel. border_type: the padding mode to be applied before convolving. The expected modes are: `(B,C,H,W)(B, C, H, W)(B, C, H, W)here `__. Examples: >>> input = torch.rand(2, 4, 5, 5) >>> sharpen = UnsharpMask((3, 3), (1.5, 1.5)) >>> output = sharpen(input) >>> output.shape torch.Size([2, 4, 5, 5])", + "type": "class", + "file_path": "kornia\\kornia\\filters\\unsharp.py", + "ast_data": "ClassDef name:UnsharpMask FunctionDef name:__init__ arg:self arg:kernel_size arg:sigma arg:border_type arguments arg arg arg arg Call Call Assign Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "enabled", + "source_code": "def enabled():\n return False", + "docstring": "Returns true if TFRT should be enabled.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\tfrt_utils.py", + "ast_data": "FunctionDef name:enabled arguments Return return:yes" + }, + { + "library": "tensorflow", + "name": "element_spec", + "source_code": "@property\ndef element_spec(self):\n return self._element_spec", + "docstring": "The type specification of an element of this iterator. For more information, read [this guide]( Returns: A (nested) structure of objects matching the structure of an element of this iterator and specifying the type of individual components.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py", + "ast_data": "FunctionDef name:element_spec arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "softplus", + "source_code": "@dispatch.add_dispatch_support\ndef softplus(x):\n return math_ops.softplus(x)", + "docstring": "Softplus activation function, . Example Usage: >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32) >>> b = tf.keras.activations.softplus(a) >>> b.numpy() array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00, 2.0000000e+01], dtype=float32) Args: x: Input tensor. Returns: The softplus activation: .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py", + "ast_data": "FunctionDef name:softplus arg:x arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_gaussian_random_matrix", + "source_code": "def _gaussian_random_matrix(n_components, n_features, random_state=None):\n _check_input_size(n_components, n_features)\n rng = check_random_state(random_state)\n components = rng.normal(loc=0.0, scale=1.0 / np.sqrt(n_components), size=(n_components, n_features))\n return components", + "docstring": "Generate a dense Gaussian random matrix. The components of the random matrix are drawn from N(0, 1.0 / n_components). Read more in the :ref:. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. random_state : int, RandomState instance or None, default=None Controls the pseudo random number generator used to generate the matrix at fit time. Pass an int for reproducible output across multiple function calls. See :term:. Returns ------- components : ndarray of shape (n_components, n_features) The generated Gaussian random matrix. See Also -------- GaussianRandomProjection", + "type": "function", + "file_path": "scikit-learn\\sklearn\\random_projection.py", + "ast_data": "FunctionDef name:_gaussian_random_matrix arg:n_components arg:n_features arg:random_state arguments arg arg arg Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_force_original_view_tracking", + "source_code": "class _force_original_view_tracking(_DecoratorContextManager):\n\n def __init__(self, mode: bool) -> None:\n self.prev = torch._C._is_view_replay_enabled()\n torch._C._set_view_replay_enabled(mode)\n self.mode = mode\n\n def __enter__(self) -> None:\n pass\n\n def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:\n torch._C._set_view_replay_enabled(self.prev)\n\n def clone(self):\n return self.__class__(self.mode)", + "docstring": "Context-manager that sets whether or not to always enable view-replay in autograd. `mode`).", + "type": "class", + "file_path": "pytorch\\torch\\autograd\\grad_mode.py", + "ast_data": "ClassDef name:_force_original_view_tracking FunctionDef name:__init__ arg:self arg:mode arguments arg arg Assign Call Call Assign FunctionDef name:__enter__ arg:self arguments arg FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg Call FunctionDef name:clone arg:self arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "compress", + "source_code": "def compress(self, data_list):\n raise NotImplementedError('Subclasses must implement this method.')", + "docstring": "Return a single value for the given list of values. The values can be assumed to be valid. For example, if this MultiValueField was instantiated with fields=(DateField(), TimeField()), this might return a datetime object created by combining the date and time in data_list.", + "type": "method", + "file_path": "django\\django\\forms\\fields.py", + "ast_data": "FunctionDef name:compress arg:self arg:data_list arguments arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "_get_config", + "source_code": "def _get_config(numels: dict[str, int]) -> dict[str, int]:\n return {prefix.upper() + 'BLOCK': numel for prefix, numel in numels.items()}", + "docstring": "Convert numels (\"x\", \"r0_\", etc.) to block sizes (\"XBLOCK\", \"R0_BLOCK\"), etc.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py", + "ast_data": "FunctionDef name:_get_config arg:numels arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_cast_transformer", + "source_code": "def _cast_transformer(parent, node, full_name, name, logs):\n dtype_str = name[3:]\n if dtype_str == 'float':\n dtype_str = 'float32'\n elif dtype_str == 'double':\n dtype_str = 'float64'\n new_arg = ast.keyword(arg='dtype', value=ast.Attribute(value=ast.Name(id='tf', ctx=ast.Load()), attr=dtype_str, ctx=ast.Load()))\n if len(node.args) == 2:\n name_arg = ast.keyword(arg='name', value=node.args[-1])\n node.args = node.args[:-1]\n node.keywords.append(name_arg)\n new_arg.value.lineno = node.lineno\n new_arg.value.col_offset = node.col_offset + 100\n node.keywords.append(new_arg)\n if isinstance(node.func, ast.Attribute):\n node.func.attr = 'cast'\n else:\n assert isinstance(node.func, ast.Name)\n node.func.id = 'cast'\n logs.append((ast_edits.INFO, node.lineno, node.col_offset, 'Changed %s call to tf.cast(..., dtype=tf.%s).' % (full_name, dtype_str)))\n return node", + "docstring": "Transforms to_int and to_float to cast(..., dtype=...).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\tf_upgrade_v2.py", + "ast_data": "FunctionDef name:_cast_transformer arg:parent arg:node arg:full_name arg:name arg:logs arguments arg arg arg arg arg Assign If Compare Assign If Compare Assign Assign Call Call Call Call Call If Compare Call Assign Call Assign Call Assign Assign Call If Call Assign Call Assign Call Return return:yes" + }, + { + "library": "sphinx", + "name": "parser", + "source_code": "@property\ndef parser(self) -> Parser:\n if (parser := self.current_document._parser) is not None:\n return parser\n msg = 'parser'\n raise KeyError(msg)", + "docstring": "Returns the parser being used for to parse the current document.", + "type": "method", + "file_path": "sphinx\\sphinx\\environment\\__init__.py", + "ast_data": "FunctionDef name:parser arg:self arguments arg If Compare Return return:yes Assign Raise Call" + }, + { + "library": "tensorflow", + "name": "add_tensor_filter", + "source_code": "def add_tensor_filter(self, filter_name, filter_callable):\n if not isinstance(filter_name, str):\n raise TypeError('Input argument filter_name is expected to be str, but is not.')\n if not filter_name:\n raise ValueError('Input argument filter_name cannot be empty.')\n if not callable(filter_callable):\n raise TypeError('Input argument filter_callable is expected to be callable, but is not.')\n self._tensor_filters[filter_name] = filter_callable", + "docstring": "Add a tensor filter. A tensor filter is a named callable of the signature: filter_callable(dump_datum, tensor), wherein dump_datum is an instance of debug_data.DebugTensorDatum carrying metadata about the dumped tensor, including tensor name, timestamps, etc. tensor is the value of the dumped tensor as an numpy.ndarray object. The return value of the function is a bool. This is the same signature as the input argument to debug_data.DebugDumpDir.find(). Args: filter_name: (str) name of the filter. Cannot be empty. filter_callable: (callable) a filter function of the signature described as above. Raises: ValueError: If filter_name is an empty str. TypeError: If filter_name is not a str. Or if filter_callable is not callable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py", + "ast_data": "FunctionDef name:add_tensor_filter arg:self arg:filter_name arg:filter_callable arguments arg arg arg If Call Raise Call If Raise Call If Call Raise Call Assign" + }, + { + "library": "scikit-learn", + "name": "make_spd_matrix", + "source_code": "@validate_params({'n_dim': [Interval(Integral, 1, None, closed='left')], 'random_state': ['random_state']}, prefer_skip_nested_validation=True)\ndef make_spd_matrix(n_dim, *, random_state=None):\n generator = check_random_state(random_state)\n A = generator.uniform(size=(n_dim, n_dim))\n U, _, Vt = linalg.svd(np.dot(A.T, A), check_finite=False)\n X = np.dot(np.dot(U, 1.0 + np.diag(generator.uniform(size=n_dim))), Vt)\n return X", + "docstring": "Generate a random symmetric, positive-definite matrix. Read more in the :ref:. Parameters ---------- n_dim : int The matrix dimension. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:. Returns ------- X : ndarray of shape (n_dim, n_dim) The random symmetric, positive-definite matrix. See Also -------- make_sparse_spd_matrix: Generate a sparse symmetric definite positive matrix. Examples -------- >>> from sklearn.datasets import make_spd_matrix >>> make_spd_matrix(n_dim=2, random_state=42) array([[2.093, 0.346], [0.346, 0.218]])", + "type": "function", + "file_path": "scikit-learn\\sklearn\\datasets\\_samples_generator.py", + "ast_data": "FunctionDef name:make_spd_matrix arg:n_dim arguments arg arg Assign Call Assign Call Assign Call Call Assign Call Call Call Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "visit", + "source_code": "@classmethod\ndef visit(cls, fn: Callable[['VariableTracker'], None], value: Any, cache: Optional[dict[int, Any]]=None) -> None:\n if cache is None:\n cache = {}\n idx = id(value)\n if idx in cache:\n return\n cache[idx] = value\n if isinstance(value, VariableTracker):\n value = value.unwrap()\n fn(value)\n value = value.unwrap()\n nonvars = value._nonvar_fields\n for key, subvalue in value.__dict__.items():\n if key not in nonvars:\n cls.visit(fn, subvalue, cache)\n elif istype(value, (list, tuple)):\n for subvalue in value:\n cls.visit(fn, subvalue, cache)\n elif istype(value, (dict, collections.OrderedDict)):\n for subvalue in value.values():\n cls.visit(fn, subvalue, cache)", + "docstring": "Walk value and call fn on all the VariableTracker instances", + "type": "method", + "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py", + "ast_data": "FunctionDef name:visit arg:cls arg:fn arg:value arg:cache arguments arg arg arg arg If Compare Assign Assign Call If Compare Return return:no Assign If Call Assign Call Call Assign Call Assign For Call If Compare Call If Call For Call If Call For Call Call" + }, + { + "library": "pytorch", + "name": "get_cpp_namespace", + "source_code": "def get_cpp_namespace(self, default: str='') -> str:\n return self.cpp_namespace_ if self.cpp_namespace_ else default", + "docstring": "Return the namespace string from joining all the namespaces by \"::\" (hence no leading \"::\"). Return default if namespace string is empty.", + "type": "method", + "file_path": "pytorch\\torchgen\\utils.py", + "ast_data": "FunctionDef name:get_cpp_namespace arg:self arg:default arguments arg arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_broadcasting_gather", + "source_code": "def _broadcasting_gather(x, i):\n static_first_dim = tensor_shape.dimension_value(x.shape[0])\n if static_first_dim == 1:\n i = 0\n elif static_first_dim is None:\n i = array_ops.where_v2(array_ops.shape(x)[0] > 1, i, 0)\n result = array_ops.gather(x, i)\n return result", + "docstring": "Wrapper for gather that implicitly broadcasts unit dimensions.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\control_flow_ops.py", + "ast_data": "FunctionDef name:_broadcasting_gather arg:x arg:i arguments arg arg Assign Call If Compare Assign If Compare Assign Call Compare Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "ConcatPair", + "source_code": "class ConcatPair(Func):\n function = 'CONCAT'\n\n def pipes_concat_sql(self, compiler, connection, **extra_context):\n coalesced = self.coalesce()\n return super(ConcatPair, coalesced).as_sql(compiler, connection, template='(%(expressions)s)', arg_joiner=' || ', **extra_context)\n as_sqlite = pipes_concat_sql\n\n def as_postgresql(self, compiler, connection, **extra_context):\n c = self.copy()\n c.set_source_expressions([expression if isinstance(expression.output_field, (CharField, TextField)) else Cast(expression, TextField()) for expression in c.get_source_expressions()])\n return c.pipes_concat_sql(compiler, connection, **extra_context)\n\n def as_mysql(self, compiler, connection, **extra_context):\n return super().as_sql(compiler, connection, function='CONCAT_WS', template=\"%(function)s('', %(expressions)s)\", **extra_context)\n\n def coalesce(self):\n c = self.copy()\n c.set_source_expressions([Coalesce(expression, Value('')) for expression in c.get_source_expressions()])\n return c", + "docstring": "Concatenate two arguments together. This is used by because not all backend databases support more than two arguments.", + "type": "class", + "file_path": "django\\django\\db\\models\\functions\\text.py", + "ast_data": "ClassDef name:ConcatPair Assign FunctionDef name:pipes_concat_sql arg:self arg:compiler arg:connection arguments arg arg arg arg Assign Call Return return:yes Call Call Assign FunctionDef name:as_postgresql arg:self arg:compiler arg:connection arguments arg arg arg arg Assign Call Call Call Call Call Call Return return:yes Call FunctionDef name:as_mysql arg:self arg:compiler arg:connection arguments arg arg arg arg Return return:yes Call Call FunctionDef name:coalesce arg:self arguments arg Assign Call Call Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "is_usable_for", + "source_code": "@classmethod\ndef is_usable_for(cls, X, Y, metric) -> bool:\n if issparse(X) and issparse(Y) and isinstance(metric, str) and ('euclidean' in metric):\n return False\n\n def is_numpy_c_ordered(X):\n return hasattr(X, 'flags') and getattr(X.flags, 'c_contiguous', False)\n\n def is_valid_sparse_matrix(X):\n return issparse(X) and X.format == 'csr' and (X.nnz > 0) and (X.indices.dtype == X.indptr.dtype == np.int32)\n is_usable = get_config().get('enable_cython_pairwise_dist', True) and (is_numpy_c_ordered(X) or is_valid_sparse_matrix(X)) and (is_numpy_c_ordered(Y) or is_valid_sparse_matrix(Y)) and (X.dtype == Y.dtype) and (X.dtype in (np.float32, np.float64)) and (metric in cls.valid_metrics() or isinstance(metric, DistanceMetric))\n return is_usable", + "docstring": "Return True if the dispatcher can be used for the given parameters. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples_X, n_features) Input data. Y : {ndarray, sparse matrix} of shape (n_samples_Y, n_features) Input data. metric : str, default='euclidean' The distance metric to use. For a list of available metrics, see the documentation of :class:. Returns ------- True if the dispatcher can be used, else False.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\metrics\\_pairwise_distances_reduction\\_dispatcher.py", + "ast_data": "FunctionDef name:is_usable_for arg:cls arg:X arg:Y arg:metric arguments arg arg arg arg If BoolOp Call Call Call Compare Return return:yes FunctionDef name:is_numpy_c_ordered arg:X arguments arg Return return:yes BoolOp Call Call FunctionDef name:is_valid_sparse_matrix arg:X arguments arg Return return:yes BoolOp Call Compare Compare Compare Assign BoolOp Call Call BoolOp Call Call BoolOp Call Call Compare Compare BoolOp Compare Call Call Return return:yes" + }, + { + "library": "cryptography", + "name": "rsa_recover_private_exponent", + "source_code": "def rsa_recover_private_exponent(e: int, p: int, q: int) -> int:\n lambda_n = (p - 1) * (q - 1) // gcd(p - 1, q - 1)\n return _modinv(e, lambda_n)", + "docstring": "Compute the RSA private_exponent (d) given the public exponent (e) and the RSA primes p and q. This uses the Carmichael totient function to generate the smallest possible working value of the private exponent.", + "type": "function", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py", + "ast_data": "FunctionDef name:rsa_recover_private_exponent arg:e arg:p arg:q arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_deprecate_positional_args", + "source_code": "def _deprecate_positional_args(func=None, *, version='1.3'):\n\n def _inner_deprecate_positional_args(f):\n sig = signature(f)\n kwonly_args = []\n all_args = []\n for name, param in sig.parameters.items():\n if param.kind == Parameter.POSITIONAL_OR_KEYWORD:\n all_args.append(name)\n elif param.kind == Parameter.KEYWORD_ONLY:\n kwonly_args.append(name)\n\n @wraps(f)\n def inner_f(*args, **kwargs):\n extra_args = len(args) - len(all_args)\n if extra_args <= 0:\n return f(*args, **kwargs)\n args_msg = ['{}={}'.format(name, arg) for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])]\n args_msg = ', '.join(args_msg)\n warnings.warn(f'Pass {args_msg} as keyword args. From version {version} passing these as positional arguments will result in an error', FutureWarning)\n kwargs.update(zip(sig.parameters, args))\n return f(**kwargs)\n return inner_f\n if func is not None:\n return _inner_deprecate_positional_args(func)\n return _inner_deprecate_positional_args", + "docstring": "Decorator for methods that issues warnings for positional arguments. Using the keyword-only argument syntax in pep 3102, arguments after the * will issue a warning when passed as a positional argument. Parameters ---------- func : callable, default=None Function to check arguments on. version : callable, default=\"1.3\" The version when positional arguments will result in error.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\validation.py", + "ast_data": "FunctionDef name:_deprecate_positional_args arg:func arguments arg arg FunctionDef name:_inner_deprecate_positional_args arg:f arguments arg Assign Call Assign Assign For Call If Compare Call If Compare Call FunctionDef name:inner_f arguments arg arg Assign Call Call If Compare Return return:yes Call Assign Call Call Assign Call Call Call Call Return return:yes Call Call Return return:yes If Compare Return return:yes Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_position", + "source_code": "def set_position(self, position):\n if position in ('center', 'zero'):\n pass\n else:\n if len(position) != 2:\n raise ValueError(\"position should be 'center' or 2-tuple\")\n if position[0] not in ['outward', 'axes', 'data']:\n raise ValueError(\"position[0] should be one of 'outward', 'axes', or 'data' \")\n self._position = position\n self.set_transform(self.get_spine_transform())\n if self.axis is not None:\n self.axis.reset_ticks()\n self.stale = True", + "docstring": "Set the position of the spine. Spine position is specified by a 2 tuple of (position type, amount). The position types are: * 'outward': place the spine out from the data area by the specified number of points. (Negative values place the spine inwards.) * 'axes': place the spine at the specified Axes coordinate (0 to 1). * 'data': place the spine at the specified data coordinate. Additionally, shorthand notations define a special positions: * 'center' -> `/gallery/spines/spine_placement_demo`", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\spines.py", + "ast_data": "FunctionDef name:set_position arg:self arg:position arguments arg arg If Compare If Compare Call Raise Call If Compare Raise Call Assign Call Call If Compare Call Assign" + }, + { + "library": "scipy", + "name": "transpose", + "source_code": "def transpose(self, axes=None, copy=False):\n return self.tocsr(copy=copy).transpose(axes=axes, copy=False)", + "docstring": "Reverses the dimensions of the sparse array/matrix. Parameters ---------- axes : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value. copy : bool, optional Indicates whether or not attributes of should be copied whenever possible. The degree to which attributes are copied varies depending on the type of sparse array/matrix being used. Returns ------- p : with the dimensions reversed. Notes ----- If is a or a , then this will return a or a , respectively. See Also -------- numpy.transpose : NumPy's implementation of 'transpose' for ndarrays", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_base.py", + "ast_data": "FunctionDef name:transpose arg:self arg:axes arg:copy arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "count_params", + "source_code": "def count_params(weights):\n unique_weights = {id(w): w for w in weights}.values()\n weight_shapes = [w.shape.as_list() for w in unique_weights]\n standardized_weight_shapes = [[0 if w_i is None else w_i for w_i in w] for w in weight_shapes]\n return int(sum((np.prod(p) for p in standardized_weight_shapes)))", + "docstring": "Count the total number of scalars composing the weights. Args: weights: An iterable containing the weights on which to compute params Returns: The total number of scalars composing the weights", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\layer_utils.py", + "ast_data": "FunctionDef name:count_params arg:weights arguments arg Assign Call Call Assign Call Assign Compare Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_set_handle_data", + "source_code": "def _set_handle_data(list_handle, element_shape, element_dtype):\n if isinstance(list_handle, ops.EagerTensor):\n if tensor_util.is_tf_type(element_shape):\n element_shape = tensor_shape.TensorShape(None)\n elif not isinstance(element_shape, tensor_shape.TensorShape):\n element_shape = tensor_shape.TensorShape(element_shape)\n handle_data = cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData()\n handle_data.is_set = True\n handle_data.shape_and_type.append(cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(shape=element_shape.as_proto(), dtype=element_dtype.as_datatype_enum, type=full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_ARRAY)))\n list_handle._handle_data = handle_data", + "docstring": "Sets type information on for consistency with graphs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\list_ops.py", + "ast_data": "FunctionDef name:_set_handle_data arg:list_handle arg:element_shape arg:element_dtype arguments arg arg arg If Call If Call Assign Call If Call Assign Call Assign Call Assign Call Call Call Call Assign" + }, + { + "library": "pandas", + "name": "insert", + "source_code": "def insert(self, loc: int, item) -> Self:\n loc = validate_insert_loc(loc, len(self))\n code = self._validate_scalar(item)\n new_vals = np.concatenate((self._ndarray[:loc], np.asarray([code], dtype=self._ndarray.dtype), self._ndarray[loc:]))\n return self._from_backing_data(new_vals)", + "docstring": "Make new ExtensionArray inserting new item at location. Follows Python list.append semantics for negative values. Parameters ---------- loc : int item : object Returns ------- type(self)", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\_mixins.py", + "ast_data": "FunctionDef name:insert arg:self arg:loc arg:item arguments arg arg arg Assign Call Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "endpos", + "source_code": "def endpos(self):\n return self.pos + len(self.raw)", + "docstring": "Position one past the end of the token", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\_type1font.py", + "ast_data": "FunctionDef name:endpos arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "get_visible", + "source_code": "def get_visible(self):\n return self._visible", + "docstring": "Return the visibility.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:get_visible arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_create_uninitialized_mirrored_tpu_replicated_variables", + "source_code": "def _create_uninitialized_mirrored_tpu_replicated_variables(**kwargs):\n dtype = kwargs.get('dtype', None)\n shape = kwargs.get('shape', None)\n initial_value = kwargs.get('initial_value', None)\n if initial_value is None:\n return _create_mirrored_tpu_replicated_variables(**kwargs)\n with maybe_init_scope():\n if initial_value is not None:\n if callable(initial_value):\n initial_value = initial_value()\n initial_value = ops.convert_to_tensor(initial_value, dtype=dtype)\n kwargs['initial_value'] = initial_value\n if dtype is None:\n kwargs['dtype'] = kwargs['initial_value'].dtype\n if shape is None:\n kwargs['shape'] = kwargs['initial_value'].shape\n mirrored_replicated_var_list = []\n for replica_id in range(num_replicas):\n replicated_var_list = []\n for logic_core_id in range(num_cores_per_replica):\n with ops.device(self._tpu_devices[replica_id][logic_core_id]):\n v = uninitialized_variable_creator(**kwargs)\n replicated_var_list.append(v)\n replica_name = '{}/r:{}'.format(kwargs['name'], replica_id)\n tpu_replicated_var = tpu_replicated_variable.TPUReplicatedVariable(variables=replicated_var_list, name=replica_name)\n mirrored_replicated_var_list.append(tpu_replicated_var)\n return mirrored_replicated_var_list", + "docstring": "Returns a list of s. The list consists of s and can be used to initialize a . Each contains a list of s which are replicated to logical cores to enable XLA SPMD compilation. Args: **kwargs: the keyword arguments for creating a variable", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py", + "ast_data": "FunctionDef name:_create_uninitialized_mirrored_tpu_replicated_variables arguments arg Assign Call Assign Call Assign Call If Compare Return return:yes Call With Call If Compare If Call Assign Call Assign Call Assign If Compare Assign If Compare Assign Assign For Call Assign For Call With Call Assign Call Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "score_samples", + "source_code": "@available_if(_search_estimator_has('score_samples'))\ndef score_samples(self, X):\n check_is_fitted(self)\n return self.best_estimator_.score_samples(X)", + "docstring": "Call score_samples on the estimator with the best found parameters. Only available if `` method.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py", + "ast_data": "FunctionDef name:score_samples arg:self arg:X arguments arg arg Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "to_proto", + "source_code": "def to_proto(self, export_scope=None):\n if export_scope is None or self.name.startswith(export_scope):\n context_def = control_flow_pb2.WhileContextDef()\n context_def.context_name = ops.strip_name_scope(self.name, export_scope)\n context_def.parallel_iterations = self._parallel_iterations\n if self._maximum_iterations is not None:\n context_def.maximum_iterations_name = ops.strip_name_scope(self._maximum_iterations.name, export_scope)\n context_def.back_prop = self._back_prop\n context_def.swap_memory = self._swap_memory\n context_def.pivot_for_pred_name = ops.strip_name_scope(self._pivot_for_pred.name, export_scope)\n context_def.pivot_for_body_name = ops.strip_name_scope(self._pivot_for_body.name, export_scope)\n context_def.pivot_name = ops.strip_name_scope(self._pivot.name, export_scope)\n context_def.loop_exit_names.extend([ops.strip_name_scope(l.name, export_scope) for l in self._loop_exits])\n context_def.loop_enter_names.extend([ops.strip_name_scope(l.name, export_scope) for l in self._loop_enters])\n context_def.values_def.MergeFrom(super(WhileContext, self)._to_values_def(export_scope=export_scope))\n for nested in self._nested_contexts:\n nested_def = context_def.nested_contexts.add()\n nested.to_control_flow_context_def(nested_def)\n return context_def\n else:\n return None", + "docstring": "Converts a to a protocol buffer. Args: export_scope: Optional . Name scope to remove. Returns: A protocol buffer.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:to_proto arg:self arg:export_scope arguments arg arg If BoolOp Compare Call Assign Call Assign Call Assign If Compare Assign Call Assign Assign Assign Call Assign Call Assign Call Call Call Call Call Call Call Call For Assign Call Call Return return:yes Return return:no" + }, + { + "library": "scrapy", + "name": "sanitize_module_name", + "source_code": "def sanitize_module_name(module_name: str) -> str:\n module_name = module_name.replace('-', '_').replace('.', '_')\n if module_name[0] not in string.ascii_letters:\n module_name = 'a' + module_name\n return module_name", + "docstring": "Sanitize the given module name, by replacing dashes and points with underscores and prefixing it with a letter if it doesn't start with one", + "type": "function", + "file_path": "scrapy\\scrapy\\commands\\genspider.py", + "ast_data": "FunctionDef name:sanitize_module_name arg:module_name arguments arg Assign Call Call If Compare Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "get_combined_dict", + "source_code": "def get_combined_dict(default_dict, additional_dict):\n d = default_dict.copy()\n d.update(additional_dict)\n return d", + "docstring": "Combines two dictionaries. This function takes two dictionaries as input and returns a new dictionary that contains all the key-value pairs from both input dictionaries. If there are any duplicate keys in the , the values from the will overwrite those in the . Args: default_dict (dict): The main dictionary that will be used as the base additional_dict (dict): The dictionary used to update Returns: dict: The resulting dictionary Example: >>> x = dict(a=1, b=1) >>> y = dict(b=2, c=3) >>> get_combined_dict(x, y) {'a': 1, 'b': 2, 'c': 3}", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\utils.py", + "ast_data": "FunctionDef name:get_combined_dict arg:default_dict arg:additional_dict arguments arg arg Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_mark_func_graph_as_unsaveable", + "source_code": "def _mark_func_graph_as_unsaveable(graph, learning_phase):\n if graph.building_function and is_placeholder(learning_phase):\n graph.mark_as_unsaveable('The keras learning phase placeholder was used inside a function. Exporting placeholders is not supported when saving out a SavedModel. Please call `tf.keras.backend.set_learning_phase(0)` in the function to set the learning phase to a constant value.')", + "docstring": "Mark func graph as unsaveable due to use of symbolic keras learning phase. Functions that capture the symbolic learning phase cannot be exported to SavedModel. Mark the funcgraph as unsaveable, so that an error will be raised if it is exported. Args: graph: Graph or FuncGraph object. learning_phase: Learning phase placeholder or int defined in the graph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:_mark_func_graph_as_unsaveable arg:graph arg:learning_phase arguments arg arg If BoolOp Call Call" + }, + { + "library": "pytorch", + "name": "sympy_index_symbol", + "source_code": "def sympy_index_symbol(name: str) -> sympy.Symbol:\n assert name[0] != 's'\n return sympy.Symbol(name, integer=True, nonnegative=True)", + "docstring": "Used to generate an integer-nonnegative symbol.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\utils.py", + "ast_data": "FunctionDef name:sympy_index_symbol arg:name arguments arg Compare Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_create_session", + "source_code": "def _create_session(distribution_strategy):\n session_config = get_default_session_config()\n global _SESSION\n if getattr(_SESSION, 'session', None) and _SESSION.session._config:\n session_config.MergeFrom(_SESSION.session._config)\n if is_tpu_strategy(distribution_strategy):\n distribution_strategy.configure(session_config)\n master = distribution_strategy.extended._tpu_cluster_resolver.master()\n session = session_module.Session(config=session_config, target=master)\n else:\n worker_context = dc.get_current_worker_context()\n if worker_context:\n dc_session_config = worker_context.session_config\n dc_session_config.MergeFrom(session_config)\n session = session_module.Session(config=dc_session_config, target=worker_context.master_target)\n else:\n distribution_strategy.configure(session_config)\n session = session_module.Session(config=session_config)\n set_session(session)", + "docstring": "Create the Distributed Strategy session.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:_create_session arg:distribution_strategy arguments arg Assign Call If BoolOp Call Call If Call Call Assign Call Assign Call Assign Call If Assign Call Assign Call Call Assign Call Call" + }, + { + "library": "django", + "name": "wrap", + "source_code": "@keep_lazy_text\ndef wrap(text, width):\n wrapper = textwrap.TextWrapper(width=width, break_long_words=False, break_on_hyphens=False, replace_whitespace=False)\n result = []\n for line in text.splitlines():\n wrapped = wrapper.wrap(line)\n if not wrapped:\n result.append(line)\n else:\n result.extend(wrapped)\n if text.endswith('\\n'):\n result.append('')\n return '\\n'.join(result)", + "docstring": "A word-wrap function that preserves existing line breaks. Expects that existing line breaks are posix newlines. Preserve all white space except added line breaks consume the space on which they break the line. Don't wrap long words, thus the output text may have lines longer than ``.", + "type": "function", + "file_path": "django\\django\\utils\\text.py", + "ast_data": "FunctionDef name:wrap arg:text arg:width arguments arg arg Assign Call Assign For Call Assign Call If Call Call If Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_local_pre_state_dict_hook", + "source_code": "def _local_pre_state_dict_hook(fsdp_state: _FSDPState, module: nn.Module, *args, **kwargs) -> None:\n if _has_fsdp_params(fsdp_state, module) and (not _module_handle(fsdp_state, module).uses_sharded_strategy):\n raise RuntimeError('``local_state_dict`` can only be used when parameters are flatten and sharded.')\n _common_pre_state_dict_hook(module, fsdp_state)", + "docstring": "Hook that runs before model.state_dict() is called. Right now, pre-state_dict hook is not supported by the PyTorch core. So this API is called from to simulate the case.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_state_dict_utils.py", + "ast_data": "FunctionDef name:_local_pre_state_dict_hook arg:fsdp_state arg:module arguments arg arg arg arg If BoolOp Call Call Raise Call Call" + }, + { + "library": "pytorch", + "name": "SourcelessUserDefinedObjectBuilder", + "source_code": "class SourcelessUserDefinedObjectBuilder:\n\n def __init__(self) -> None:\n raise AssertionError('Use SourcelessUserDefinedObjectBuilder.create()')\n\n @staticmethod\n def create(tx: 'InstructionTranslator', value) -> VariableTracker:\n value_type = type(value)\n if issubclass(value_type, MutableMapping):\n return MutableMappingVariable(value, mutation_type=ValueMutationNew())\n elif isinstance(value, torch.nn.Module):\n return UnspecializedNNModuleVariable(value, mutation_type=ValueMutationNew())\n else:\n return UserDefinedObjectVariable(value, mutation_type=ValueMutationNew())", + "docstring": "SourceLessBuilder does not return a UserDefinedObjectVariable, but in some cases it might be ok to return UserDefinedObjects. In such case, use this builder.", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\variables\\builder.py", + "ast_data": "ClassDef name:SourcelessUserDefinedObjectBuilder FunctionDef name:__init__ arg:self arguments arg Raise Call FunctionDef name:create arg:tx arg:value arguments arg arg Assign Call If Call Return return:yes Call Call If Call Return return:yes Call Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "check_replacements", + "source_code": "def check_replacements(self):\n applied = self.recorder.applied_migrations()\n for key, migration in self.loader.replacements.items():\n all_applied = all((m in applied for m in migration.replaces))\n if all_applied and key not in applied:\n self.recorder.record_applied(*key)", + "docstring": "Mark replacement migrations applied if their replaced set all are. Do this unconditionally on every migrate, rather than just when migrations are applied or unapplied, to correctly handle the case when a new squash migration is pushed to a deployment that already had all its replaced migrations applied. In this case no new migration will be applied, but the applied state of the squashed migration must be maintained.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\executor.py", + "ast_data": "FunctionDef name:check_replacements arg:self arguments arg Assign Call For Call Assign Call Compare If BoolOp Compare Call" + }, + { + "library": "django", + "name": "StrictlyBelowLookup", + "source_code": "@BaseSpatialField.register_lookup\nclass StrictlyBelowLookup(GISLookup):\n lookup_name = 'strictly_below'", + "docstring": "The 'strictly_below' operator returns true if A's bounding box is strictly below B's bounding box.", + "type": "class", + "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py", + "ast_data": "ClassDef name:StrictlyBelowLookup Assign" + }, + { + "library": "tensorflow", + "name": "export", + "source_code": "def export(self, name=None):\n with ops.name_scope(name, '%s_lookup_table_export_values' % self.name, [self.resource_handle]):\n with ops.colocate_with(self.resource_handle):\n exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(self.resource_handle, self._key_dtype, self._value_dtype)\n return (exported_keys, exported_values)", + "docstring": "Returns tensors of all keys and values in the table. Args: name: A name for the operation (optional). Returns: A pair of tensors with the first tensor containing all keys and the second tensors containing all values in the table.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:export arg:self arg:name arguments arg arg With Call With Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_fully_transformed_path", + "source_code": "def get_fully_transformed_path(self):\n self._revalidate()\n return self._transform.transform_path_affine(self._transformed_path)", + "docstring": "Return a fully-transformed copy of the child path.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:get_fully_transformed_path arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "django", + "name": "EntitiesForbidden", + "source_code": "class EntitiesForbidden(DefusedXmlException):\n\n def __init__(self, name, value, base, sysid, pubid, notation_name):\n super().__init__()\n self.name = name\n self.value = value\n self.base = base\n self.sysid = sysid\n self.pubid = pubid\n self.notation_name = notation_name\n\n def __str__(self):\n tpl = \"EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})\"\n return tpl.format(self.name, self.sysid, self.pubid)", + "docstring": "Entity definition is forbidden.", + "type": "class", + "file_path": "django\\django\\core\\serializers\\xml_serializer.py", + "ast_data": "ClassDef name:EntitiesForbidden FunctionDef name:__init__ arg:self arg:name arg:value arg:base arg:sysid arg:pubid arg:notation_name arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_cg", + "source_code": "def _cg(A, b, x0=None, tol=1e-10, maxiter=1000):\n n = b.size\n assert A.n == n\n assert A.m == n\n b_norm = np.linalg.norm(b)\n kvec = A.diag\n kvec = np.maximum(kvec, 1e-06)\n if x0 is None:\n x = np.zeros(n)\n else:\n x = x0\n r = b - A.dot(x)\n w = r / kvec\n p = np.zeros(n)\n beta = 0.0\n rho = np.dot(r, w)\n k = 0\n while np.sqrt(abs(rho)) > tol * b_norm and k < maxiter:\n p = w + beta * p\n z = A.dot(p)\n alpha = rho / np.dot(p, z)\n r = r - alpha * z\n w = r / kvec\n rhoold = rho\n rho = np.dot(r, w)\n x = x + alpha * p\n beta = rho / rhoold\n k += 1\n err = np.linalg.norm(A.dot(x) - b)\n return (x, err)", + "docstring": "Use Preconditioned Conjugate Gradient iteration to solve A x = b A simple Jacobi (diagonal) preconditioner is used. Parameters ---------- A : _Sparse_Matrix_coo *A* must have been compressed before by compress_csc or compress_csr method. b : array Right hand side of the linear system. x0 : array, optional Starting guess for the solution. Defaults to the zero vector. tol : float, optional Tolerance to achieve. The algorithm terminates when the relative residual is below tol. Default is 1e-10. maxiter : int, optional Maximum number of iterations. Iteration will stop after *maxiter* steps even if the specified tolerance has not been achieved. Defaults to 1000. Returns ------- x : array The converged solution. err : float The absolute error np.linalg.norm(A.dot(x) - b)", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py", + "ast_data": "FunctionDef name:_cg arg:A arg:b arg:x0 arg:tol arg:maxiter arguments arg arg arg arg arg Assign Compare Compare Assign Call Assign Assign Call If Compare Assign Call Assign Assign Call Assign Assign Call Assign Assign Call Assign While BoolOp Compare Call Call Compare Assign Assign Call Assign Call Assign Assign Assign Assign Call Assign Assign Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "remove", + "source_code": "def remove(self, keys, name=None):\n return self.erase(keys, name)", + "docstring": "Removes and its associated values from the table. If a key is not present in the table, it is silently ignored. Args: keys: Keys to remove. Can be a tensor of any shape. Must match the table's key type. name: A name for the operation (optional). Returns: The created Operation. Raises: TypeError: when do not match the table data types.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", + "ast_data": "FunctionDef name:remove arg:self arg:keys arg:name arguments arg arg arg Return return:yes Call" + }, + { + "library": "sphinx", + "name": "hlescape", + "source_code": "def hlescape(s: str, latex_engine: str | None=None) -> str:\n if latex_engine in {'lualatex', 'xelatex'}:\n return s.translate(_tex_hlescape_map_without_unicode)\n else:\n return s.translate(_tex_hlescape_map)", + "docstring": "Escape text for LaTeX highlighter.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\texescape.py", + "ast_data": "FunctionDef name:hlescape arg:s arg:latex_engine arguments arg arg If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_item_by_idx", + "source_code": "def _get_item_by_idx(self, iterator, idx) -> T:\n size = len(self)\n idx = operator.index(idx)\n if not -size <= idx < size:\n raise IndexError(f'index {idx} is out of range')\n idx %= size\n return next(islice(iterator, idx, None))", + "docstring": "Get the idx-th item of the iterator.", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\container.py", + "ast_data": "FunctionDef name:_get_item_by_idx arg:self arg:iterator arg:idx arguments arg arg arg Assign Call Assign Call If Compare Raise Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "function_type", + "source_code": "@property\ndef function_type(self):\n return self._function_type", + "docstring": "Returns a FunctionType representing the Python function signature.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_type_utils.py", + "ast_data": "FunctionDef name:function_type arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_neg_flops", + "source_code": "@ops.RegisterStatistics('Neg', 'flops')\ndef _neg_flops(graph, node):\n return _unary_op_flops(graph, node)", + "docstring": "Compute flops for Neg operation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py", + "ast_data": "FunctionDef name:_neg_flops arg:graph arg:node arguments arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "AlterModelOptions", + "source_code": "class AlterModelOptions(ModelOptionOperation):\n ALTER_OPTION_KEYS = ['base_manager_name', 'default_manager_name', 'default_related_name', 'get_latest_by', 'managed', 'ordering', 'permissions', 'default_permissions', 'select_on_save', 'verbose_name', 'verbose_name_plural']\n\n def __init__(self, name, options):\n self.options = options\n super().__init__(name)\n\n def deconstruct(self):\n kwargs = {'name': self.name, 'options': self.options}\n return (self.__class__.__qualname__, [], kwargs)\n\n def state_forwards(self, app_label, state):\n state.alter_model_options(app_label, self.name_lower, self.options, self.ALTER_OPTION_KEYS)\n\n def database_forwards(self, app_label, schema_editor, from_state, to_state):\n pass\n\n def database_backwards(self, app_label, schema_editor, from_state, to_state):\n pass\n\n def describe(self):\n return 'Change Meta options on %s' % self.name\n\n @property\n def migration_name_fragment(self):\n return 'alter_%s_options' % self.name_lower", + "docstring": "Set new model options that don't directly affect the database schema (like verbose_name, permissions, ordering). Python code in migrations may still need them.", + "type": "class", + "file_path": "django\\django\\db\\migrations\\operations\\models.py", + "ast_data": "ClassDef name:AlterModelOptions Assign FunctionDef name:__init__ arg:self arg:name arg:options arguments arg arg arg Assign Call Call FunctionDef name:deconstruct arg:self arguments arg Assign Return return:yes FunctionDef name:state_forwards arg:self arg:app_label arg:state arguments arg arg arg Call FunctionDef name:database_forwards arg:self arg:app_label arg:schema_editor arg:from_state arg:to_state arguments arg arg arg arg arg FunctionDef name:database_backwards arg:self arg:app_label arg:schema_editor arg:from_state arg:to_state arguments arg arg arg arg arg FunctionDef name:describe arg:self arguments arg Return return:yes FunctionDef name:migration_name_fragment arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "add", + "source_code": "def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):\n raise NotImplementedError('subclasses of BaseCache must provide an add() method')", + "docstring": "Set a value in the cache if the key does not already exist. If timeout is given, use that timeout for the key; otherwise use the default cache timeout. Return True if the value was stored, False otherwise.", + "type": "method", + "file_path": "django\\django\\core\\cache\\backends\\base.py", + "ast_data": "FunctionDef name:add arg:self arg:key arg:value arg:timeout arg:version arguments arg arg arg arg arg Raise Call" + }, + { + "library": "kornia", + "name": "_onnx_version_conversion", + "source_code": "def _onnx_version_conversion(self, op: onnx.ModelProto, target_ir_version: Optional[int]=None, target_opset_version: Optional[int]=None) -> onnx.ModelProto:\n if op.ir_version != target_ir_version or op.opset_import[0].version != target_opset_version:\n model_bytes = io.BytesIO()\n onnx.save_model(op, model_bytes)\n loaded_model = onnx.load_model_from_string(model_bytes.getvalue())\n if target_opset_version is not None:\n loaded_model = onnx.version_converter.convert_version(loaded_model, target_opset_version)\n onnx.checker.check_model(loaded_model)\n if target_ir_version is not None:\n loaded_model.ir_version = target_ir_version\n op = loaded_model\n return op", + "docstring": "Automatic conversion of the model's IR/OPSET version to the given target version. Args: op: onnx operation. target_ir_version: The target IR version to convert to. target_opset_version: The target OPSET version to convert to.", + "type": "method", + "file_path": "kornia\\kornia\\core\\mixin\\onnx.py", + "ast_data": "FunctionDef name:_onnx_version_conversion arg:self arg:op arg:target_ir_version arg:target_opset_version arguments arg arg arg arg If BoolOp Compare Compare Assign Call Call Assign Call Call If Compare Assign Call Call If Compare Assign Assign Return return:yes" + }, + { + "library": "django", + "name": "deserialize", + "source_code": "def deserialize(format, stream_or_string, **options):\n d = get_deserializer(format)\n return d(stream_or_string, **options)", + "docstring": "Deserialize a stream or a string. Return an iterator that yields ``.", + "type": "function", + "file_path": "django\\django\\core\\serializers\\__init__.py", + "ast_data": "FunctionDef name:deserialize arg:format arg:stream_or_string arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "forward", + "source_code": "def forward(self, *args):\n self._nav_stack.forward()\n self.set_history_buttons()\n self._update_view()", + "docstring": "Move forward in the view lim stack. For convenience of being directly connected as a GUI callback, which often get passed additional parameters, this method accepts arbitrary parameters, but does not use them.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:forward arg:self arguments arg arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "_resource_apply_sparse_duplicate_indices", + "source_code": "def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):\n summed_grad, unique_indices = _deduplicate_indexed_slices(values=grad, indices=indices)\n return self._resource_apply_sparse(summed_grad, handle, unique_indices)", + "docstring": "Add ops to apply sparse gradients to , with repeated indices. Optimizers which override this method must deal with repeated indices. See the docstring of for details. By default the correct behavior, to sum non-unique indices and their associated gradients, is enforced by first pre-processing and and passing them on to . Optimizers which deal correctly with duplicate indices may instead override this method to avoid the overhead of summing. Args: grad: a representing the gradient for the affected indices. handle: a of dtype which points to the variable to be updated. indices: a of integral type representing the indices for which the gradient is nonzero. Indices may be repeated. Returns: An which updates the value of the variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py", + "ast_data": "FunctionDef name:_resource_apply_sparse_duplicate_indices arg:self arg:grad arg:handle arg:indices arguments arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_link_flags", + "source_code": "@tf_export('sysconfig.get_link_flags')\ndef get_link_flags():\n is_mac = _platform.system() == 'Darwin'\n ver = _VERSION.split('.')[0]\n flags = []\n if not _MONOLITHIC_BUILD:\n flags.append('-L%s' % get_lib())\n if is_mac:\n flags.append('-ltensorflow_framework.%s' % ver)\n else:\n flags.append('-l:libtensorflow_framework.so.%s' % ver)\n return flags", + "docstring": "Returns the linker flags for linking with TensorFlow. The returned list of arguments can be passed to the linker for linking against TensorFlow. The result is platform dependent. For example, on a typical Linux system with Python 3.7 the following command prints >>> print(tf.sysconfig.get_link_flags()) Returns: A list of strings for the linker flags.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\platform\\sysconfig.py", + "ast_data": "FunctionDef name:get_link_flags arguments Assign Compare Call Assign Call Assign If Call Call If Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "flatten_with_joined_string_paths", + "source_code": "def flatten_with_joined_string_paths(structure, separator='/', expand_composites=False):\n flat_paths = yield_flat_paths(structure, expand_composites=expand_composites)\n\n def stringify_and_join(path_elements):\n return separator.join((str(path_element) for path_element in path_elements))\n flat_string_paths = (stringify_and_join(path) for path in flat_paths)\n return list(zip(flat_string_paths, flatten(structure, expand_composites=expand_composites)))", + "docstring": "Returns a list of (string path, atom) tuples. The order of tuples produced matches that of . This allows you to flatten a nested structure while keeping information about where in the structure each atom was located. See for more information. Args: structure: the nested structure to flatten. separator: string to separate levels of hierarchy in the results, defaults to '/'. expand_composites: If true, then composite tensors such as and are expanded into their component tensors. Returns: A list of (string, atom) tuples.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\nest.py", + "ast_data": "FunctionDef name:flatten_with_joined_string_paths arg:structure arg:separator arg:expand_composites arguments arg arg arg Assign Call FunctionDef name:stringify_and_join arg:path_elements arguments arg Return return:yes Call Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "close", + "source_code": "def close(self):\n self._fp.close()", + "docstring": "Closes the file. It is unsupported to call any other methods off this object after closing it. Note that this class supports the 'with' statement in modern versions of Python, to call this automatically", + "type": "method", + "file_path": "scipy\\scipy\\io\\_fortran.py", + "ast_data": "FunctionDef name:close arg:self arguments arg Call" + }, + { + "library": "pytorch", + "name": "visit_once", + "source_code": "def visit_once(self, thing: Any):\n idx = id(thing)\n if idx in self.visited:\n return False\n self.visited.add(idx)\n return True", + "docstring": "Return True on the first call to with thing, otherwise false", + "type": "method", + "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py", + "ast_data": "FunctionDef name:visit_once arg:self arg:thing arguments arg arg Assign Call If Compare Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "LeakyReLU", + "source_code": "class LeakyReLU(Module):\n __constants__ = ['inplace', 'negative_slope']\n inplace: bool\n negative_slope: float\n\n def __init__(self, negative_slope: float=0.01, inplace: bool=False) -> None:\n super().__init__()\n self.negative_slope = negative_slope\n self.inplace = inplace\n\n def forward(self, input: Tensor) -> Tensor:\n return F.leaky_relu(input, self.negative_slope, self.inplace)\n\n def extra_repr(self) -> str:\n inplace_str = ', inplace=True' if self.inplace else ''\n return f'negative_slope={self.negative_slope}{inplace_str}'", + "docstring": "Applies the LeakyReLU function element-wise. .. math:: \\text{LeakyReLU}(x) = \\max(0, x) + \\text{negative\\_slope} * \\min(0, x) or .. math:: \\text{LeakyReLU}(x) = \\begin{cases} x, & \\text{ if } x \\geq 0 \\\\ \\text{negative\\_slope} \\times x, & \\text{ otherwise } \\end{cases} Args: negative_slope: Controls the angle of the negative slope (which is used for negative input values). Default: 1e-2 inplace: can optionally do the operation in-place. Default: `(*)*(*)`, same shape as the input .. image:: ../scripts/activation_images/LeakyReLU.png Examples:: >>> m = nn.LeakyReLU(0.1) >>> input = torch.randn(2) >>> output = m(input)", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\activation.py", + "ast_data": "ClassDef name:LeakyReLU Assign FunctionDef name:__init__ arg:self arg:negative_slope arg:inplace arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Assign Return return:yes" + }, + { + "library": "scipy", + "name": "aps03_f", + "source_code": "def aps03_f(x, a, b):\n return a * x * np.exp(b * x)", + "docstring": "Rapidly changing at the root", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_tstutils.py", + "ast_data": "FunctionDef name:aps03_f arg:x arg:a arg:b arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self, model: ir.Model, exported_program: torch.export.ExportedProgram | None):\n self.model: ir.Model = model\n self.exported_program = exported_program\n self._inference_session: ort.InferenceSession | None = None\n self._tempdir: tempfile.TemporaryDirectory | None = None\n self._capture_strategy: str | None = None", + "docstring": "Initialize the ONNX program with the specified model and exported program. Args: model: The ONNX model. exported_program: The exported program that produced the ONNX model. Optional.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:model arg:exported_program arguments arg arg arg Assign" + }, + { + "library": "tensorflow", + "name": "truncated_normal", + "source_code": "def truncated_normal(self, shape, mean, stddev, dtype):\n if self.seed:\n op = stateless_random_ops.stateless_truncated_normal\n else:\n op = random_ops.truncated_normal\n return op(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)", + "docstring": "A deterministic truncated normal if seed is passed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py", + "ast_data": "FunctionDef name:truncated_normal arg:self arg:shape arg:mean arg:stddev arg:dtype arguments arg arg arg arg arg If Assign Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "floor", + "source_code": "@tf_export('math.floor', 'floor')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef floor(x, name=None):\n return gen_math_ops.floor(x, name)", + "docstring": "Returns element-wise largest integer not greater than x. Both input range is and the output range consists of all integer values. For example: >>> x = tf.constant([1.3324, -1.5, 5.555, -2.532, 0.99, float(\"inf\")]) >>> tf.floor(x).numpy() array([ 1., -2., 5., -3., 0., inf], dtype=float32) Args: x: A . Must be one of the following types: , , , . name: A name for the operation (optional). Returns: A . Has the same type as x.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:floor arg:x arg:name arguments arg arg Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "identity_matrix", + "source_code": "def identity_matrix(self, input: Tensor) -> Tensor:\n return kornia.eye_like(4, input)", + "docstring": "Return 4x4 identity matrix.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\_3d\\base.py", + "ast_data": "FunctionDef name:identity_matrix arg:self arg:input arguments arg arg Return return:yes Call" + }, + { + "library": "sphinx", + "name": "is_suppressed_warning", + "source_code": "def is_suppressed_warning(warning_type: str, sub_type: str, suppress_warnings: Set[str] | Sequence[str]) -> bool:\n if warning_type is None or len(suppress_warnings) == 0:\n return False\n suppressed_warnings = frozenset(suppress_warnings)\n if warning_type in suppressed_warnings:\n return True\n if f'{warning_type}.*' in suppressed_warnings:\n return True\n return f'{warning_type}.{sub_type}' in suppressed_warnings", + "docstring": "Check whether the warning is suppressed or not.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\logging.py", + "ast_data": "FunctionDef name:is_suppressed_warning arg:warning_type arg:sub_type arg:suppress_warnings arguments arg arg arg If BoolOp Compare Compare Call Return return:yes Assign Call If Compare Return return:yes If Compare Return return:yes Return return:yes Compare" + }, + { + "library": "scipy", + "name": "Schwefel06", + "source_code": "class Schwefel06(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n self.custom_bounds = ([-10.0, 10.0], [-10.0, 10.0])\n self.global_optimum = [[1.0, 3.0]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n return max(abs(x[0] + 2 * x[1] - 7), abs(2 * x[0] + x[1] - 5))", + "docstring": "Schwefel 6 objective function. This class defines the Schwefel 6 [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\\text{Schwefel06}}(x) = \\max(\\lvert x_1 + 2x_2 - 7 \\rvert, \\lvert 2x_1 + x_2 - 5 \\rvert) with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", + "ast_data": "ClassDef name:Schwefel06 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "should_save_traces", + "source_code": "def should_save_traces():\n return _save_options_context.save_traces", + "docstring": "Whether to trace layer functions-can be disabled in the save_traces arg.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\utils.py", + "ast_data": "FunctionDef name:should_save_traces arguments Return return:yes" + }, + { + "library": "pandas", + "name": "_freeze", + "source_code": "def _freeze(self) -> None:\n object.__setattr__(self, '__frozen', True)", + "docstring": "Prevents setting additional attributes.", + "type": "method", + "file_path": "pandas\\pandas\\core\\base.py", + "ast_data": "FunctionDef name:_freeze arg:self arguments arg Call" + }, + { + "library": "pandas", + "name": "_index_to_interp_indices", + "source_code": "def _index_to_interp_indices(index: Index, method: str) -> np.ndarray:\n xarr = index._values\n if needs_i8_conversion(xarr.dtype):\n xarr = xarr.view('i8')\n if method == 'linear':\n inds = xarr\n inds = cast(np.ndarray, inds)\n else:\n inds = np.asarray(xarr)\n if method in ('values', 'index'):\n if inds.dtype == np.object_:\n inds = lib.maybe_convert_objects(inds)\n return inds", + "docstring": "Convert Index to ndarray of indices to pass to NumPy/SciPy.", + "type": "function", + "file_path": "pandas\\pandas\\core\\missing.py", + "ast_data": "FunctionDef name:_index_to_interp_indices arg:index arg:method arguments arg arg Assign If Call Assign Call If Compare Assign Assign Call Assign Call If Compare If Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_sparse_tensors", + "source_code": "def get_sparse_tensors(self, transformation_cache, state_manager):\n return CategoricalColumn.IdWeightPair(transformation_cache.get(self, state_manager), None)", + "docstring": "See base class.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:get_sparse_tensors arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_DefaultGraphStack", + "source_code": "class _DefaultGraphStack(stack.DefaultStack[Graph]):\n\n def __init__(self) -> None:\n super(_DefaultGraphStack, self).__init__()\n self._global_default_graph = None\n\n def get_default(self) -> Graph:\n if self.stack:\n return self.stack[-1]\n elif self._global_default_graph:\n return self._global_default_graph\n else:\n self._global_default_graph = Graph()\n return self._global_default_graph\n\n def _GetGlobalDefaultGraph(self) -> Graph:\n if self._global_default_graph is None:\n self._global_default_graph = Graph()\n return self._global_default_graph\n\n def reset(self) -> None:\n super(_DefaultGraphStack, self).reset()\n self._global_default_graph = None\n\n @tf_contextlib.contextmanager\n def get_controller(self, default) -> Iterator[Graph]:\n context.context().context_switches.push(default.building_function, default.as_default, default._device_function_stack)\n try:\n with super(_DefaultGraphStack, self).get_controller(default) as g, context.graph_mode():\n yield g\n finally:\n context.context().context_switches.pop()", + "docstring": "A thread-local stack of objects for providing an implicit default graph.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "ClassDef name:_DefaultGraphStack FunctionDef name:__init__ arg:self arguments arg Call Call Assign FunctionDef name:get_default arg:self arguments arg If Return return:yes If Return return:yes Assign Call Return return:yes FunctionDef name:_GetGlobalDefaultGraph arg:self arguments arg If Compare Assign Call Return return:yes FunctionDef name:reset arg:self arguments arg Call Call Assign FunctionDef name:get_controller arg:self arg:default arguments arg arg Call Call Try With Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "parse_stmts", + "source_code": "def parse_stmts(stmts: str) -> tuple[str, str]:\n stmts = textwrap.dedent(stmts).strip()\n lines: list[str] = stmts.splitlines(keepends=False)\n assert len(lines) >= 3, f'Invalid string:\\n{stmts}'\n column_header_pattern = '^Python\\\\s{35}\\\\| C\\\\+\\\\+(\\\\s*)$'\n signature_pattern = '^: f\\\\((.*)\\\\)( -> (.+))?\\\\s*$'\n separation_pattern = '^[-]{40} | [-]{40}$'\n code_pattern = '^(.{40}) \\\\|($| (.*)$)'\n column_match = re.search(column_header_pattern, lines[0])\n if column_match is None:\n raise ValueError(f'Column header `{lines[0]}` does not match pattern `{column_header_pattern}`')\n assert re.search(separation_pattern, lines[1])\n py_lines: list[str] = []\n cpp_lines: list[str] = []\n for l in lines[2:]:\n l_match = re.search(code_pattern, l)\n if l_match is None:\n raise ValueError(f'Invalid line `{l}`')\n py_lines.append(l_match.groups()[0])\n cpp_lines.append(l_match.groups()[2] or '')\n l_from_stmts = f'{py_lines[-1]:<40} | {cpp_lines[-1]:<40}'.rstrip()\n assert l_from_stmts == l.rstrip(), f'Failed to round trip `{l}`'\n return ('\\n'.join(py_lines), '\\n'.join(cpp_lines))", + "docstring": "Helper function for side-by-side Python and C++ stmts. For more complex statements, it can be useful to see Python and C++ code side by side. To this end, we provide an **extremely restricted** way to define Python and C++ code side-by-side. The schema should be mostly self explanatory, with the following non-obvious caveats: - Width for the left (Python) column MUST be 40 characters. - The column separator is \" | \", not \"|\". Whitespace matters.", + "type": "function", + "file_path": "pytorch\\benchmarks\\instruction_counts\\core\\utils.py", + "ast_data": "FunctionDef name:parse_stmts arg:stmts arguments arg Assign Call Call Call Compare Call Assign Assign Assign Assign Assign Call If Compare Raise Call Call For Assign Call If Compare Raise Call Call Call Call BoolOp Call Assign Call Compare Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "db_parameters", + "source_code": "def db_parameters(self, connection):\n type_string = self.db_type(connection)\n check_string = self.db_check(connection)\n return {'type': type_string, 'check': check_string}", + "docstring": "Extension of db_type(), providing a range of different return values (type, checks). This will look at db_type(), allowing custom model fields to override it.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\__init__.py", + "ast_data": "FunctionDef name:db_parameters arg:self arg:connection arguments arg arg Assign Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "statically_known_power_of_2", + "source_code": "def statically_known_power_of_2(self, expr: Expr) -> bool:\n return isinstance(expr, sympy.Integer) and is_power_of_2(int(expr))", + "docstring": "Returns a bool indicating if x is known to be a power of 2.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\sizevars.py", + "ast_data": "FunctionDef name:statically_known_power_of_2 arg:self arg:expr arguments arg arg Return return:yes BoolOp Call Call Call" + }, + { + "library": "tensorflow", + "name": "_AcoshGrad", + "source_code": "@ops.RegisterGradient('Acosh')\ndef _AcoshGrad(op: ops.Operation, grad):\n y = op.outputs[0]\n with ops.control_dependencies([grad]):\n y = math_ops.conj(y)\n return grad / math_ops.sinh(y)", + "docstring": "Returns grad * 1/sinh(y).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py", + "ast_data": "FunctionDef name:_AcoshGrad arg:op arg:grad arguments arg arg Assign With Call Assign Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "copy", + "source_code": "def copy(self):\n return copy.copy(self)", + "docstring": "Return a shallow copy of the , which will share the vertices and codes with the source .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\path.py", + "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, channel):\n self.SendEvents = channel.stream_stream('/tensorflow.EventListener/SendEvents', request_serializer=tensorflow_dot_core_dot_util_dot_event__pb2.Event.SerializeToString, response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString)\n self.SendTracebacks = channel.unary_unary('/tensorflow.EventListener/SendTracebacks', request_serializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.CallTraceback.SerializeToString, response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString)\n self.SendSourceFiles = channel.unary_unary('/tensorflow.EventListener/SendSourceFiles', request_serializer=tensorflow_dot_core_dot_protobuf_dot_debug__pb2.DebuggedSourceFiles.SerializeToString, response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString)", + "docstring": "Constructor. Args: channel: A grpc.Channel.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_service_pb2_grpc.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:channel arguments arg arg Assign Call Assign Call Assign Call" + }, + { + "library": "pandas", + "name": "_should_reindex_frame_op", + "source_code": "def _should_reindex_frame_op(self, right, op, axis: int, fill_value, level) -> bool:\n if op is operator.pow or op is roperator.rpow:\n return False\n if not isinstance(right, DataFrame):\n return False\n if (isinstance(self.columns, MultiIndex) or isinstance(right.columns, MultiIndex)) and (not self.columns.equals(right.columns)) and (fill_value is None):\n return True\n if fill_value is None and level is None and (axis == 1):\n left_uniques = self.columns.unique()\n right_uniques = right.columns.unique()\n cols = left_uniques.intersection(right_uniques)\n if len(cols) and (not (len(cols) == len(left_uniques) and len(cols) == len(right_uniques))):\n return True\n return False", + "docstring": "Check if this is an operation between DataFrames that will need to reindex.", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:_should_reindex_frame_op arg:self arg:right arg:op arg:axis arg:fill_value arg:level arguments arg arg arg arg arg arg If BoolOp Compare Compare Return return:yes If Call Return return:yes If BoolOp BoolOp Call Call Call Compare Return return:yes If BoolOp Compare Compare Compare Assign Call Assign Call Assign Call If BoolOp Call BoolOp Compare Call Call Compare Call Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_axislabel_direction", + "source_code": "def set_axislabel_direction(self, label_direction):\n self._axislabel_add_angle = _api.check_getitem({'+': 0, '-': 180}, label_direction=label_direction)", + "docstring": "Adjust the direction of the axis label. Note that the *label_direction*\\s '+' and '-' are relative to the direction of the increasing coordinate. Parameters ---------- label_direction : {\"+\", \"-\"}", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py", + "ast_data": "FunctionDef name:set_axislabel_direction arg:self arg:label_direction arguments arg arg Assign Call" + }, + { + "library": "django", + "name": "_is_relevant_relation", + "source_code": "def _is_relevant_relation(relation, altered_field):\n field = relation.field\n if field.many_to_many:\n return False\n if altered_field.primary_key and field.to_fields == [None]:\n return True\n return altered_field.name in field.to_fields", + "docstring": "When altering the given field, must constraints on its model from the given relation be temporarily dropped?", + "type": "function", + "file_path": "django\\django\\db\\backends\\base\\schema.py", + "ast_data": "FunctionDef name:_is_relevant_relation arg:relation arg:altered_field arguments arg arg Assign If Return return:yes If BoolOp Compare Return return:yes Return return:yes Compare" + }, + { + "library": "cherrypy", + "name": "set_response_cookie", + "source_code": "def set_response_cookie(path=None, path_header=None, name='session_id', timeout=60, domain=None, secure=False, httponly=False):\n cookie = cherrypy.serving.response.cookie\n cookie[name] = cherrypy.serving.session.id\n cookie[name]['path'] = path or cherrypy.serving.request.headers.get(path_header) or '/'\n if timeout:\n cookie[name]['max-age'] = timeout * 60\n _add_MSIE_max_age_workaround(cookie[name], timeout)\n if domain is not None:\n cookie[name]['domain'] = domain\n if secure:\n cookie[name]['secure'] = 1\n if httponly:\n if not cookie[name].isReservedKey('httponly'):\n raise ValueError('The httponly cookie token is not supported.')\n cookie[name]['httponly'] = 1", + "docstring": "Set a response cookie for the client. path the 'path' value to stick in the response cookie metadata. path_header if 'path' is None (the default), then the response cookie 'path' will be pulled from request.headers[path_header]. name the name of the cookie. timeout the expiration timeout for the cookie. If 0 or other boolean False, no 'expires' param will be set, and the cookie will be a \"session cookie\" which expires when the browser is closed. domain the cookie domain. secure if False (the default) the cookie 'secure' value will not be set. If True, the cookie 'secure' value will be set (to 1). httponly If False (the default) the cookie 'httponly' value will not be set. If True, the cookie 'httponly' value will be set (to 1).", + "type": "function", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:set_response_cookie arg:path arg:path_header arg:name arg:timeout arg:domain arg:secure arg:httponly arguments arg arg arg arg arg arg arg Assign Assign Assign BoolOp Call If Assign Call If Compare Assign If Assign If If Call Raise Call Assign" + }, + { + "library": "pandas", + "name": "maybe_convert_usecols", + "source_code": "def maybe_convert_usecols(usecols: str | list[int] | list[str] | usecols_func | None) -> None | list[int] | list[str] | usecols_func:\n if usecols is None:\n return usecols\n if is_integer(usecols):\n raise ValueError('Passing an integer for `usecols` is no longer supported. Please pass in a list of int from 0 to `usecols` inclusive instead.')\n if isinstance(usecols, str):\n return _range2cols(usecols)\n return usecols", + "docstring": "Convert into a compatible format for parsing in . Parameters ---------- usecols : object The use-columns object to potentially convert. Returns ------- converted : object The compatible format of .", + "type": "function", + "file_path": "pandas\\pandas\\io\\excel\\_util.py", + "ast_data": "FunctionDef name:maybe_convert_usecols arg:usecols arguments arg If Compare Return return:yes If Call Raise Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_subplotspec", + "source_code": "def get_subplotspec(self):\n return self._subplotspec", + "docstring": "Get the SubplotSpec instance.", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py", + "ast_data": "FunctionDef name:get_subplotspec arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_call_batch_hook_helper", + "source_code": "def _call_batch_hook_helper(self, hook_name, batch, logs):\n if self._check_timing:\n start_time = time.time()\n logs = self._process_logs(logs, is_batch_hook=True)\n for callback in self.callbacks:\n hook = getattr(callback, hook_name)\n hook(batch, logs)\n if self._check_timing:\n if hook_name not in self._hook_times:\n self._hook_times[hook_name] = []\n self._hook_times[hook_name].append(time.time() - start_time)", + "docstring": "Helper function for methods.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", + "ast_data": "FunctionDef name:_call_batch_hook_helper arg:self arg:hook_name arg:batch arg:logs arguments arg arg arg arg If Assign Call Assign Call For Assign Call Call If If Compare Assign Call Call" + }, + { + "library": "kornia", + "name": "nms", + "source_code": "def nms(signal: Tensor, window_size: int=5, cutoff: float=0.0) -> Tensor:\n if window_size % 2 != 1:\n raise ValueError(f'window_size has to be odd, got {window_size}')\n _, ixs = F.max_pool2d(signal, kernel_size=window_size, stride=1, padding=window_size // 2, return_indices=True)\n h, w = signal.shape[1:]\n coords = torch.arange(h * w, device=signal.device).reshape(1, h, w)\n nms = ixs == coords\n if cutoff is None:\n return nms\n else:\n return nms & (signal > cutoff)", + "docstring": "Apply non-maximum suppression.", + "type": "function", + "file_path": "kornia\\kornia\\feature\\disk\\detector.py", + "ast_data": "FunctionDef name:nms arg:signal arg:window_size arg:cutoff arguments arg arg arg If Compare Raise Call Assign Call Assign Assign Call Call Assign Compare If Compare Return return:yes Return return:yes Compare" + }, + { + "library": "matplotlib", + "name": "IdentityTransform", + "source_code": "class IdentityTransform(Affine2DBase):\n _mtx = np.identity(3)\n\n def frozen(self):\n return self\n __str__ = _make_str_method()\n\n def get_matrix(self):\n return self._mtx\n\n def transform(self, values):\n return np.asanyarray(values)\n\n def transform_affine(self, values):\n return np.asanyarray(values)\n\n def transform_non_affine(self, values):\n return np.asanyarray(values)\n\n def transform_path(self, path):\n return path\n\n def transform_path_affine(self, path):\n return path\n\n def transform_path_non_affine(self, path):\n return path\n\n def get_affine(self):\n return self\n\n def inverted(self):\n return self", + "docstring": "A special class that does one thing, the identity transform, in a fast way.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "ClassDef name:IdentityTransform Assign Call FunctionDef name:frozen arg:self arguments arg Return return:yes Assign Call FunctionDef name:get_matrix arg:self arguments arg Return return:yes FunctionDef name:transform arg:self arg:values arguments arg arg Return return:yes Call FunctionDef name:transform_affine arg:self arg:values arguments arg arg Return return:yes Call FunctionDef name:transform_non_affine arg:self arg:values arguments arg arg Return return:yes Call FunctionDef name:transform_path arg:self arg:path arguments arg arg Return return:yes FunctionDef name:transform_path_affine arg:self arg:path arguments arg arg Return return:yes FunctionDef name:transform_path_non_affine arg:self arg:path arguments arg arg Return return:yes FunctionDef name:get_affine arg:self arguments arg Return return:yes FunctionDef name:inverted arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "nunique", + "source_code": "@final\ndef nunique(self, dropna: bool=True) -> int:\n uniqs = self.unique()\n if dropna:\n uniqs = remove_na_arraylike(uniqs)\n return len(uniqs)", + "docstring": "Return number of unique elements in the object. Excludes NA values by default. Parameters ---------- dropna : bool, default True Don't include NaN in the count. Returns ------- int A integer indicating the number of unique elements in the object. See Also -------- DataFrame.nunique: Method nunique for DataFrame. Series.count: Count non-NA/null observations in the Series. Examples -------- >>> s = pd.Series([1, 3, 5, 7, 7]) >>> s 0 1 1 3 2 5 3 7 4 7 dtype: int64 >>> s.nunique() 4", + "type": "method", + "file_path": "pandas\\pandas\\core\\base.py", + "ast_data": "FunctionDef name:nunique arg:self arg:dropna arguments arg arg Assign Call If Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "_clone", + "source_code": "def _clone(self):\n c = self.__class__(self.raw_query, model=self.model, query=self.query, params=self.params, translations=self.translations, using=self._db, hints=self._hints)\n c._prefetch_related_lookups = self._prefetch_related_lookups[:]\n return c", + "docstring": "Same as QuerySet._clone()", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:_clone arg:self arguments arg Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "max", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef max(x, axis=None, keepdims=False):\n return math_ops.reduce_max(x, axis, keepdims)", + "docstring": "Maximum value in a tensor. Args: x: A tensor or variable. axis: An integer, the axis to find maximum values. keepdims: A boolean, whether to keep the dimensions or not. If is , the rank of the tensor is reduced by 1. If is , the reduced dimension is retained with length 1. Returns: A tensor with maximum values of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:max arg:x arg:axis arg:keepdims arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "Chi2", + "source_code": "class Chi2(Gamma):\n arg_constraints = {'df': constraints.positive}\n\n def __init__(self, df: Union[Tensor, float], validate_args: Optional[bool]=None) -> None:\n super().__init__(0.5 * df, 0.5, validate_args=validate_args)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(Chi2, _instance)\n return super().expand(batch_shape, new)\n\n @property\n def df(self) -> Tensor:\n return self.concentration * 2", + "docstring": "Creates a Chi-squared distribution parameterized by shape parameter :attr:. This is exactly equivalent to `` Example:: >>> # xdoctest: +IGNORE_WANT(\"non-deterministic\") >>> m = Chi2(torch.tensor([1.0])) >>> m.sample() # Chi2 distributed with shape df=1 tensor([ 0.1046]) Args: df (float or Tensor): shape parameter of the distribution", + "type": "class", + "file_path": "pytorch\\torch\\distributions\\chi2.py", + "ast_data": "ClassDef name:Chi2 Assign FunctionDef name:__init__ arg:self arg:df arg:validate_args arguments arg arg arg Call Call FunctionDef name:expand arg:self arg:batch_shape arg:_instance arguments arg arg arg Assign Call Return return:yes Call Call FunctionDef name:df arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "default_decompositions", + "source_code": "def default_decompositions() -> 'CustomDecompTable':\n return CustomDecompTable()", + "docstring": "This is the default decomposition table which contains decomposition of all ATEN operators to core aten opset. Use this API together with :func:", + "type": "function", + "file_path": "pytorch\\torch\\export\\exported_program.py", + "ast_data": "FunctionDef name:default_decompositions arguments Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_num_combinations", + "source_code": "@staticmethod\ndef _num_combinations(n_features, min_degree, max_degree, interaction_only, include_bias):\n if interaction_only:\n combinations = sum([comb(n_features, i, exact=True) for i in range(max(1, min_degree), min(max_degree, n_features) + 1)])\n else:\n combinations = comb(n_features + max_degree, max_degree, exact=True) - 1\n if min_degree > 0:\n d = min_degree - 1\n combinations -= comb(n_features + d, d, exact=True) - 1\n if include_bias:\n combinations += 1\n return combinations", + "docstring": "Calculate number of terms in polynomial expansion This should be equivalent to counting the number of terms returned by _combinations(...) but much faster.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_polynomial.py", + "ast_data": "FunctionDef name:_num_combinations arg:n_features arg:min_degree arg:max_degree arg:interaction_only arg:include_bias arguments arg arg arg arg arg If Assign Call Call Call Call Call Assign Call If Compare Assign Call If Return return:yes" + }, + { + "library": "scrapy", + "name": "css", + "source_code": "def css(self, *a: Any, **kw: Any) -> SelectorList:\n raise NotSupported(\"Response content isn't text\")", + "docstring": "Shortcut method implemented only by responses whose content is text (subclasses of TextResponse).", + "type": "method", + "file_path": "scrapy\\scrapy\\http\\response\\__init__.py", + "ast_data": "FunctionDef name:css arg:self arguments arg arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "_sharded_op_impl", + "source_code": "def _sharded_op_impl(func):\n return functools.partial(_decorator_func, op=func, op_table=_SHARDED_OPS)", + "docstring": "Decorator to register a default sharded op.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\__init__.py", + "ast_data": "FunctionDef name:_sharded_op_impl arg:func arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_export_to_saved_model_graph", + "source_code": "def _export_to_saved_model_graph(self, object_map, tensor_map, **unused_kwargs):\n new_obj = copy.copy(self)\n with ops.device(self._resource_device):\n new_resource = new_obj._create_resource()\n new_obj._resource_handle = new_resource\n object_map[self] = new_obj\n tensor_map[self.resource_handle] = new_resource\n return [self.resource_handle]", + "docstring": "For implementing .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\resource.py", + "ast_data": "FunctionDef name:_export_to_saved_model_graph arg:self arg:object_map arg:tensor_map arguments arg arg arg arg Assign Call With Call Assign Call Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "should_trigger_for_step", + "source_code": "def should_trigger_for_step(self, step):\n raise NotImplementedError", + "docstring": "Return true if the timer should trigger for the specified step.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py", + "ast_data": "FunctionDef name:should_trigger_for_step arg:self arg:step arguments arg arg Raise" + }, + { + "library": "matplotlib", + "name": "get_siblings", + "source_code": "def get_siblings(self, a):\n return self._grouper.get_siblings(a)", + "docstring": "Return all of the items joined with *a*, including itself.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:get_siblings arg:self arg:a arguments arg arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "is_numpy_namespace", + "source_code": "@lru_cache(100)\ndef is_numpy_namespace(xp: Namespace) -> bool:\n return xp.__name__ in {'numpy', _compat_module_name() + '.numpy'}", + "docstring": "Returns True if is a NumPy namespace. This includes both NumPy itself and the version wrapped by array-api-compat. See Also -------- array_namespace is_cupy_namespace is_torch_namespace is_ndonnx_namespace is_dask_namespace is_jax_namespace is_pydata_sparse_namespace is_array_api_strict_namespace", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py", + "ast_data": "FunctionDef name:is_numpy_namespace arg:xp arguments arg Return return:yes Compare Call Call" + }, + { + "library": "tensorflow", + "name": "function_scope_id", + "source_code": "@property\ndef function_scope_id(self):\n return id(self._context_handle)", + "docstring": "Returns an id that is unique to each scope holding functions.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:function_scope_id arg:self arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "split", + "source_code": "def split(self, X, y, groups=None):\n y = check_array(y, input_name='y', ensure_2d=False, dtype=None)\n return super().split(X, y, groups=groups)", + "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. Note that providing `random_state` to an integer.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py", + "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_TriangularSolve", + "source_code": "def _TriangularSolve(x, r):\n return _linalg.adjoint(linalg_ops.matrix_triangular_solve(r, _linalg.adjoint(x), lower=False, adjoint=False))", + "docstring": "Equiv to matmul(x, adjoint(matrix_inverse(r))) if r is upper-tri.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py", + "ast_data": "FunctionDef name:_TriangularSolve arg:x arg:r arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "set_extra_resource_collection", + "source_code": "def set_extra_resource_collection(self, val) -> None:\n self.extra_resources_collection = val\n if self.extra_resources_collection:\n self.get_resources_dir(can_create=True)\n return", + "docstring": "Collects extra resources such as generated kernels, index tensor data, and any other metadata that is required to complete the Execution Trace content. The caller should call this method with val=True after calling register_callback() if they want to collect the extra resources.", + "type": "method", + "file_path": "pytorch\\torch\\profiler\\profiler.py", + "ast_data": "FunctionDef name:set_extra_resource_collection arg:self arg:val arguments arg arg Assign If Call Return return:no" + }, + { + "library": "matplotlib", + "name": "set_clip_on", + "source_code": "def set_clip_on(self, b):\n self._clipon = b\n self.pchanged()\n self.stale = True", + "docstring": "Set whether the artist uses clipping. When False, artists will be visible outside the Axes which can lead to unexpected results. Parameters ---------- b : bool", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\artist.py", + "ast_data": "FunctionDef name:set_clip_on arg:self arg:b arguments arg arg Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "DoNotConvert", + "source_code": "class DoNotConvert(Rule):\n\n def __str__(self):\n return 'DoNotConvert rule for {}'.format(self._prefix)\n\n def get_action(self, module):\n if self.matches(module.__name__):\n return Action.DO_NOT_CONVERT\n return Action.NONE", + "docstring": "Indicates that this module should be not converted.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\config_lib.py", + "ast_data": "ClassDef name:DoNotConvert FunctionDef name:__str__ arg:self arguments arg Return return:yes Call FunctionDef name:get_action arg:self arg:module arguments arg arg If Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "rank", + "source_code": "@dispatch.dispatch_for_types(array_ops.rank, StructuredTensor)\ndef rank(input, name=None):\n with ops.name_scope(name, 'rank', [input]) as name:\n return constant_op.constant(input.rank, dtype=dtypes.int32)", + "docstring": "Returns the rank of a tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py", + "ast_data": "FunctionDef name:rank arg:input arg:name arguments arg arg With Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "codegen", + "source_code": "def codegen(self, code: IndentedBuffer) -> None:\n pass", + "docstring": "Second pass to output code", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper.py", + "ast_data": "FunctionDef name:codegen arg:self arg:code arguments arg arg" + }, + { + "library": "pytorch", + "name": "_PythonMsgPrinter", + "source_code": "class _PythonMsgPrinter(PythonPrinter):\n\n def __init__(self, src_map: dict[str, list[str]]) -> None:\n super().__init__()\n self.src_map = src_map\n\n def _print_Symbol(self, sym: sympy.Symbol) -> str:\n return self.src_map[sym.name][0]", + "docstring": "Util printer that replaces sympy symbols with their source-level names and renders sympy relational operators (e.g., Eq, Ne, Ge, Le) inline (i.e., as ==, !=, >, <).", + "type": "class", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "ClassDef name:_PythonMsgPrinter FunctionDef name:__init__ arg:self arg:src_map arguments arg arg Call Call Assign FunctionDef name:_print_Symbol arg:self arg:sym arguments arg arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "axhspan", + "source_code": "@_docstring.interpd\ndef axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):\n self._check_no_units([xmin, xmax], ['xmin', 'xmax'])\n (ymin, ymax), = self._process_unit_info([('y', [ymin, ymax])], kwargs)\n p = mpatches.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, **kwargs)\n p.set_transform(self.get_yaxis_transform(which='grid'))\n ix = self.dataLim.intervalx.copy()\n mx = self.dataLim.minposx\n self.add_patch(p)\n self.dataLim.intervalx = ix\n self.dataLim.minposx = mx\n p.get_path()._interpolation_steps = mpl.axis.GRIDLINE_INTERPOLATION_STEPS\n self._request_autoscale_view('y')\n return p", + "docstring": "Add a horizontal span (rectangle) across the Axes. The rectangle spans from *ymin* to *ymax* vertically, and, by default, the whole x-axis horizontally. The x-span can be set using *xmin* (default: 0) and *xmax* (default: 1) which are in axis units; e.g. `~.Axes.set_xlim~matplotlib.patches.Rectangle~matplotlib.patches.Rectangle` properties %(Rectangle:kwdoc)s See Also -------- axvspan : Add a vertical span across the Axes.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", + "ast_data": "FunctionDef name:axhspan arg:self arg:ymin arg:ymax arg:xmin arg:xmax arguments arg arg arg arg arg arg Call Assign Call Assign Call Call Call Assign Call Assign Call Assign Assign Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_extend_before", + "source_code": "def _extend_before(self, other):\n other_num_lines = other.num_lines()\n self._lines = other.lines + self._lines\n new_font_attr_segs = {}\n for line_index in self.font_attr_segs:\n new_font_attr_segs[other_num_lines + line_index] = self.font_attr_segs[line_index]\n new_font_attr_segs.update(other.font_attr_segs)\n self._font_attr_segs = new_font_attr_segs\n new_annotations = {}\n for key in self._annotations:\n if isinstance(key, int):\n new_annotations[other_num_lines + key] = self.annotations[key]\n else:\n new_annotations[key] = other.annotations[key]\n new_annotations.update(other.annotations)\n self._annotations = new_annotations", + "docstring": "Add another RichTextLines object to the front. Args: other: (RichTextLines) The other object to add to the front to this object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", + "ast_data": "FunctionDef name:_extend_before arg:self arg:other arguments arg arg Assign Call Assign Assign For Assign Call Assign Assign For If Call Assign Assign Call Assign" + }, + { + "library": "numpy", + "name": "_call_as_normal", + "source_code": "def _call_as_normal(self, *args, **kwargs):\n excluded = self.excluded\n if not kwargs and (not excluded):\n func = self.pyfunc\n vargs = args\n else:\n nargs = len(args)\n names = [_n for _n in kwargs if _n not in excluded]\n inds = [_i for _i in range(nargs) if _i not in excluded]\n the_args = list(args)\n\n def func(*vargs):\n for _n, _i in enumerate(inds):\n the_args[_i] = vargs[_n]\n kwargs.update(zip(names, vargs[len(inds):]))\n return self.pyfunc(*the_args, **kwargs)\n vargs = [args[_i] for _i in inds]\n vargs.extend([kwargs[_n] for _n in names])\n return self._vectorize_call(func=func, args=vargs)", + "docstring": "Return arrays with the results of broadcast (vectorized) over and not in .", + "type": "method", + "file_path": "numpy\\numpy\\lib\\_function_base_impl.py", + "ast_data": "FunctionDef name:_call_as_normal arg:self arguments arg arg arg Assign If BoolOp Assign Assign Assign Call Assign Compare Assign Call Compare Assign Call FunctionDef name:func arguments arg For Call Assign Call Call Call Return return:yes Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_IsOpFree", + "source_code": "def _IsOpFree(op):\n if op.control_inputs:\n return False\n if op.graph._is_function(op.type) or op.type == 'SymbolicGradient':\n return True\n for x in op.inputs:\n if not util.IsLoopConstantEnter(x.op):\n return False\n return True", + "docstring": "Determines if needs a control dependency.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:_IsOpFree arg:op arguments arg If Return return:yes If BoolOp Call Compare Return return:yes For If Call Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "try_transform", + "source_code": "def try_transform(self, lhs, name, lookups=None):\n transform_class = lhs.get_transform(name)\n if transform_class:\n return transform_class(lhs)\n else:\n output_field = lhs.output_field.__class__\n suggested_lookups = difflib.get_close_matches(name, lhs.output_field.get_lookups())\n if suggested_lookups:\n suggestion = ', perhaps you meant %s?' % ' or '.join(suggested_lookups)\n else:\n suggestion = '.'\n if lookups is not None:\n name_index = lookups.index(name)\n unsupported_lookup = LOOKUP_SEP.join(lookups[name_index:])\n else:\n unsupported_lookup = name\n raise FieldError(\"Unsupported lookup '%s' for %s or join on the field not permitted%s\" % (unsupported_lookup, output_field.__name__, suggestion))", + "docstring": "Helper method for build_lookup(). Try to fetch and initialize a transform for name parameter from lhs.", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\query.py", + "ast_data": "FunctionDef name:try_transform arg:self arg:lhs arg:name arg:lookups arguments arg arg arg arg Assign Call If Return return:yes Call Assign Assign Call Call If Assign Call Assign If Compare Assign Call Assign Call Assign Raise Call" + }, + { + "library": "django", + "name": "linear_name", + "source_code": "@property\ndef linear_name(self):\n units, name = capi.linear_units(self.ptr, byref(c_char_p()))\n return name", + "docstring": "Return the name of the linear units.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py", + "ast_data": "FunctionDef name:linear_name arg:self arguments arg Assign Call Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "_parsed_string_to_bounds", + "source_code": "def _parsed_string_to_bounds(self, reso: Resolution, parsed: dt.datetime) -> tuple[Timestamp, Timestamp]:\n freq = OFFSET_TO_PERIOD_FREQSTR.get(reso.attr_abbrev, reso.attr_abbrev)\n per = Period(parsed, freq=freq)\n start, end = (per.start_time, per.end_time)\n start = start.as_unit(self.unit)\n end = end.as_unit(self.unit)\n start = start.tz_localize(parsed.tzinfo)\n end = end.tz_localize(parsed.tzinfo)\n if parsed.tzinfo is not None:\n if self.tz is None:\n raise ValueError('The index must be timezone aware when indexing with a date string with a UTC offset')\n return (start, end)", + "docstring": "Calculate datetime bounds for parsed time string and its resolution. Parameters ---------- reso : Resolution Resolution provided by parsed string. parsed : datetime Datetime from parsed string. Returns ------- lower, upper: pd.Timestamp", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\datetimes.py", + "ast_data": "FunctionDef name:_parsed_string_to_bounds arg:self arg:reso arg:parsed arguments arg arg arg Assign Call Assign Call Assign Assign Call Assign Call Assign Call Assign Call If Compare If Compare Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "total_variation", + "source_code": "@tf_export('image.total_variation')\n@dispatch.add_dispatch_support\ndef total_variation(images, name=None):\n with ops.name_scope(name, 'total_variation'):\n ndims = images.get_shape().ndims\n if ndims == 3:\n pixel_dif1 = images[1:, :, :] - images[:-1, :, :]\n pixel_dif2 = images[:, 1:, :] - images[:, :-1, :]\n sum_axis = None\n elif ndims == 4:\n pixel_dif1 = images[:, 1:, :, :] - images[:, :-1, :, :]\n pixel_dif2 = images[:, :, 1:, :] - images[:, :, :-1, :]\n sum_axis = [1, 2, 3]\n else:\n raise ValueError(\"'images' must be either 3 or 4-dimensional.\")\n tot_var = math_ops.reduce_sum(math_ops.abs(pixel_dif1), axis=sum_axis) + math_ops.reduce_sum(math_ops.abs(pixel_dif2), axis=sum_axis)\n return tot_var", + "docstring": "Calculate and return the total variation for one or more images. The total variation is the sum of the absolute differences for neighboring pixel-values in the input images. This measures how much noise is in the images. This can be used as a loss-function during optimization so as to suppress noise in images. If you have a batch of images, then you should calculate the scalar loss-value as the sum: This implements the anisotropic 2-D version of the formula described here: Args: images: 4-D Tensor of shape or 3-D Tensor of shape . name: A name for the operation (optional). Raises: ValueError: if images.shape is not a 3-D or 4-D vector. Returns: The total variation of . If was 4-D, return a 1-D float Tensor of shape with the total variation for each image in the batch. If was 3-D, return a scalar float with the total variation for that image.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py", + "ast_data": "FunctionDef name:total_variation arg:images arg:name arguments arg arg With Call Assign Call If Compare Assign Assign Assign If Compare Assign Assign Assign Raise Call Assign Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "ExitGradientColocation", + "source_code": "def ExitGradientColocation(self, op: ops.Operation, gradient_uid):\n if self._outer_context:\n self._outer_context.ExitGradientColocation(op, gradient_uid)", + "docstring": "Start building a gradient colocated with an op.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", + "ast_data": "FunctionDef name:ExitGradientColocation arg:self arg:op arg:gradient_uid arguments arg arg arg If Call" + }, + { + "library": "tensorflow", + "name": "do_encode", + "source_code": "def do_encode(self, extension_type_spec_value, encode_fn):\n type_spec_class_name = type_spec_registry.get_name(type(extension_type_spec_value))\n type_state = extension_type_spec_value._serialize()\n num_flat_components = len(nest.flatten(extension_type_spec_value._component_specs, expand_composites=True))\n encoded_type_spec = struct_pb2.StructuredValue()\n encoded_type_spec.type_spec_value.CopyFrom(struct_pb2.TypeSpecProto(type_spec_class=struct_pb2.TypeSpecProto.EXTENSION_TYPE_SPEC, type_state=encode_fn(type_state), type_spec_class_name=type_spec_class_name, num_flat_components=num_flat_components))\n return encoded_type_spec", + "docstring": "Returns an encoded proto for the given .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py", + "ast_data": "FunctionDef name:do_encode arg:self arg:extension_type_spec_value arg:encode_fn arguments arg arg arg Assign Call Call Assign Call Assign Call Call Assign Call Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_solve_svd_design_matrix", + "source_code": "def _solve_svd_design_matrix(self, alpha, y, sqrt_sw, X_mean, singvals_sq, U, UT_y):\n w = (singvals_sq + alpha) ** (-1) - alpha ** (-1)\n if self.fit_intercept:\n normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)\n intercept_dim = _find_smallest_angle(normalized_sw, U)\n w[intercept_dim] = -alpha ** (-1)\n c = np.dot(U, self._diag_dot(w, UT_y)) + alpha ** (-1) * y\n G_inverse_diag = self._decomp_diag(w, U) + alpha ** (-1)\n if len(y.shape) != 1:\n G_inverse_diag = G_inverse_diag[:, np.newaxis]\n return (G_inverse_diag, c)", + "docstring": "Compute dual coefficients and diagonal of G^-1. Used when we have an SVD decomposition of X (n_samples > n_features and X is dense).", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py", + "ast_data": "FunctionDef name:_solve_svd_design_matrix arg:self arg:alpha arg:y arg:sqrt_sw arg:X_mean arg:singvals_sq arg:U arg:UT_y arguments arg arg arg arg arg arg arg arg Assign If Assign Call Assign Call Assign Assign Call Call Assign Call If Compare Call Assign Return return:yes" + }, + { + "library": "scipy", + "name": "nnlf", + "source_code": "def nnlf(self, theta, x):\n loc, scale, args = self._unpack_loc_scale(theta)\n if not self._argcheck(*args) or scale <= 0:\n return inf\n x = (asarray(x) - loc) / scale\n n_log_scale = len(x) * log(scale)\n if np.any(~self._support_mask(x, *args)):\n return inf\n return self._nnlf(x, *args) + n_log_scale", + "docstring": "Negative loglikelihood function. Notes ----- This is `theta` are the parameters (including loc and scale).", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:nnlf arg:self arg:theta arg:x arguments arg arg arg Assign Call If BoolOp Call Compare Return return:yes Assign Call Assign Call Call If Call Call Return return:yes Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_set_joinable_configs", + "source_code": "def _set_joinable_configs(self) -> None:\n assert len(self._joinables) > 0\n is_first_joinable = True\n for joinable in self._joinables:\n joinable._join_config = _JoinConfig(enable=self._enable, throw_on_early_termination=self._throw_on_early_termination, is_first_joinable=is_first_joinable)\n is_first_joinable = False", + "docstring": "Set the :class: of each participating :class:.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py", + "ast_data": "FunctionDef name:_set_joinable_configs arg:self arguments arg Compare Call Assign For Assign Call Assign" + }, + { + "library": "pytorch", + "name": "arg_constraints", + "source_code": "@property\ndef arg_constraints(self) -> dict[str, constraints.Constraint]:\n raise NotImplementedError", + "docstring": "Returns a dictionary from argument names to :class: objects that should be satisfied by each argument of this distribution. Args that are not tensors need not appear in this dict.", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\distribution.py", + "ast_data": "FunctionDef name:arg_constraints arg:self arguments arg Raise" + }, + { + "library": "tensorflow", + "name": "validate_args", + "source_code": "@property\ndef validate_args(self):\n return self._validate_args", + "docstring": "Returns True if Tensor arguments will be validated.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py", + "ast_data": "FunctionDef name:validate_args arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "is_autocast_enabled", + "source_code": "def is_autocast_enabled(both: bool=True) -> bool:\n if TYPE_CHECKING:\n return False\n if not torch_version_ge(1, 10, 2):\n return False\n if both:\n if torch_version_ge(2, 4):\n return torch.is_autocast_enabled() or torch.is_autocast_enabled('cpu')\n else:\n return torch.is_autocast_enabled() or torch.is_autocast_cpu_enabled()\n return torch.is_autocast_enabled()", + "docstring": "Check if torch autocast is enabled. Args: both: if True will consider autocast region for both types of devices Returns: Return a Bool, will always return False for a torch without support, otherwise will be: if both is True . If both is False will return just .", + "type": "function", + "file_path": "kornia\\kornia\\utils\\helpers.py", + "ast_data": "FunctionDef name:is_autocast_enabled arg:both arguments arg If Return return:yes If Call Return return:yes If If Call Return return:yes BoolOp Call Call Return return:yes BoolOp Call Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "LassoSelector", + "source_code": "class LassoSelector(_SelectorWidget):\n\n def __init__(self, ax, onselect=None, *, useblit=True, props=None, button=None):\n super().__init__(ax, onselect, useblit=useblit, button=button)\n self.verts = None\n props = {**(props if props is not None else {}), 'animated': self.useblit, 'visible': False}\n line = Line2D([], [], **props)\n self.ax.add_line(line)\n self._selection_artist = line\n\n def _press(self, event):\n self.verts = [self._get_data(event)]\n self._selection_artist.set_visible(True)\n\n def _release(self, event):\n if self.verts is not None:\n self.verts.append(self._get_data(event))\n self.onselect(self.verts)\n self._selection_artist.set_data([[], []])\n self._selection_artist.set_visible(False)\n self.verts = None\n\n def _onmove(self, event):\n if self.verts is None:\n return\n self.verts.append(self._get_data(event))\n self._selection_artist.set_data(list(zip(*self.verts)))\n self.update()", + "docstring": "Selection curve of an arbitrary shape. For the selector to remain responsive you must keep a reference to it. The selected path can be used in conjunction with to select data points from an image. In contrast to , is written with an interface similar to and , and will continue to interact with the Axes until disconnected. Example usage:: ax = plt.subplot() ax.plot(x, y) def onselect(verts): print(verts) lasso = LassoSelector(ax, onselect) Parameters ---------- ax : The parent Axes for the widget. onselect : function, optional Whenever the lasso is released, the *onselect* function is called and passed the vertices of the selected path. useblit : bool, default: True Whether to use blitting for faster drawing (if supported by the backend). See the tutorial :ref: for details. props : dict, optional Properties with which the line is drawn, see for valid properties. Default values are defined in `.MouseButton.MouseButton`, which corresponds to all buttons.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "ClassDef name:LassoSelector FunctionDef name:__init__ arg:self arg:ax arg:onselect arguments arg arg arg arg arg arg Call Call Assign Assign Compare Assign Call Call Assign FunctionDef name:_press arg:self arg:event arguments arg arg Assign Call Call FunctionDef name:_release arg:self arg:event arguments arg arg If Compare Call Call Call Call Call Assign FunctionDef name:_onmove arg:self arg:event arguments arg arg If Compare Return return:no Call Call Call Call Call Call" + }, + { + "library": "django", + "name": "fid", + "source_code": "@property\ndef fid(self):\n return capi.get_fid(self.ptr)", + "docstring": "Return the feature identifier.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py", + "ast_data": "FunctionDef name:fid arg:self arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "close", + "source_code": "@async_unsafe\ndef close(self):\n self.validate_thread_sharing()\n self.run_on_commit = []\n if self.closed_in_transaction or self.connection is None:\n return\n try:\n self._close()\n finally:\n if self.in_atomic_block:\n self.closed_in_transaction = True\n self.needs_rollback = True\n else:\n self.connection = None", + "docstring": "Close the connection to the database.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\base.py", + "ast_data": "FunctionDef name:close arg:self arguments arg Call Assign If BoolOp Compare Return return:no Try Call If Assign Assign Assign" + }, + { + "library": "kornia", + "name": "num_cameras", + "source_code": "@property\ndef num_cameras(self) -> int:\n num_cameras: int = -1\n if self.intrinsics is not None:\n num_cameras = int(self.intrinsics.shape[1])\n return num_cameras", + "docstring": "Return the number of pinholes cameras per batch.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", + "ast_data": "FunctionDef name:num_cameras arg:self arguments arg If Compare Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "set_requires_all_reduce", + "source_code": "def set_requires_all_reduce(self, requires_all_reduce: bool, *, recurse: bool=True) -> None:\n self_module = cast(nn.Module, self)\n modules = list(self_module.modules()) if recurse else [self_module]\n for module in modules:\n if isinstance(module, FSDPModule):\n state = module._get_fsdp_state()\n if (fsdp_param_group := state._fsdp_param_group):\n fsdp_param_group.all_reduce_grads = requires_all_reduce", + "docstring": "Sets if the module should all-reduce gradients. This can be used to implement gradient accumulation with only reduce-scatter but not all-reduce for HSDP.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py", + "ast_data": "FunctionDef name:set_requires_all_reduce arg:self arg:requires_all_reduce arguments arg arg arg Assign Call Assign Call Call For If Call Assign Call If Assign" + }, + { + "library": "tensorflow", + "name": "_trackable_children", + "source_code": "def _trackable_children(self, save_type='checkpoint', **kwargs):\n if save_type == 'checkpoint':\n return {}\n return {f'trace_{n}': fn for n, fn in enumerate(self._list_all_concrete_functions_for_serialization())}", + "docstring": "For implementing .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py", + "ast_data": "FunctionDef name:_trackable_children arg:self arg:save_type arguments arg arg arg If Compare Return return:no Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_get_control_flow_context", + "source_code": "def _get_control_flow_context(self):\n return self._control_flow_context", + "docstring": "Returns the current control flow context. Returns: A context object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", + "ast_data": "FunctionDef name:_get_control_flow_context arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "global_variables", + "source_code": "@tf_export(v1=['global_variables'])\ndef global_variables(scope=None):\n return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope)", + "docstring": "Returns global variables. Global variables are variables that are shared across machines in a distributed environment. The constructor or automatically adds new variables to the graph collection . This convenience function returns the contents of that collection. An alternative to global variables are local variables. See @compatibility(TF2) Not compatible with eager execution and . In particular, Graph collections are deprecated in TF2. Instead please create a [tf.Module]( container for all your model state, including variables. You can then list all the variables in your through the attribute. @end_compatibility Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose attribute matches using . Items without a attribute are never returned if a scope is supplied. The choice of means that a without special tokens filters by prefix. Returns: A list of objects.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:global_variables arg:scope arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_rewrite_output_as_tensor", + "source_code": "def _rewrite_output_as_tensor(body_grad_graph, grad_output_slices):\n with body_grad_graph.as_default():\n new_output = tensor_conversion.convert_to_tensor_v2(grad_output_slices)\n idx = _get_tensor_index_in_iterable(body_grad_graph.structured_outputs, grad_output_slices)\n body_grad_graph.structured_outputs[idx] = new_output\n body_grad_graph.outputs = func_graph.flatten(body_grad_graph.structured_outputs)", + "docstring": "Rewrites grad_output_slices to be a Tensor output. Args: body_grad_graph: _WhileBodyGradFuncGraph. grad_output_slices: IndexedSlices output of body_grad_graph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2_indexed_slices_rewriter.py", + "ast_data": "FunctionDef name:_rewrite_output_as_tensor arg:body_grad_graph arg:grad_output_slices arguments arg arg With Call Assign Call Assign Call Assign Assign Call" + }, + { + "library": "scipy", + "name": "get_knots", + "source_code": "def get_knots(self):\n data = self._data\n k, n = (data[5], data[7])\n return data[8][k:n - k]", + "docstring": "Return positions of interior knots of the spline. Internally, the knot vector contains `` additional boundary knots.", + "type": "method", + "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py", + "ast_data": "FunctionDef name:get_knots arg:self arguments arg Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_tensor_from_tensor_info", + "source_code": "@tf_export(v1=['saved_model.get_tensor_from_tensor_info', 'saved_model.utils.get_tensor_from_tensor_info'])\n@deprecation.deprecated(None, _DEPRECATION_MSG)\ndef get_tensor_from_tensor_info(tensor_info, graph=None, import_scope=None):\n graph = graph or ops.get_default_graph()\n\n def _get_tensor(name):\n return graph.get_tensor_by_name(ops.prepend_name_scope(name, import_scope=import_scope))\n encoding = tensor_info.WhichOneof('encoding')\n if encoding == 'name':\n return _get_tensor(tensor_info.name)\n elif encoding == 'coo_sparse':\n return sparse_tensor.SparseTensor(_get_tensor(tensor_info.coo_sparse.indices_tensor_name), _get_tensor(tensor_info.coo_sparse.values_tensor_name), _get_tensor(tensor_info.coo_sparse.dense_shape_tensor_name))\n elif encoding == 'composite_tensor':\n spec_proto = struct_pb2.StructuredValue(type_spec_value=tensor_info.composite_tensor.type_spec)\n spec = nested_structure_coder.decode_proto(spec_proto)\n components = [_get_tensor(component.name) for component in tensor_info.composite_tensor.components]\n return nest.pack_sequence_as(spec, components, expand_composites=True)\n else:\n raise ValueError(f'Invalid TensorInfo.encoding: {encoding}. Expected `coo_sparse`, `composite_tensor`, or `name` for a dense tensor.')", + "docstring": "Returns the Tensor or CompositeTensor described by a TensorInfo proto. Args: tensor_info: A TensorInfo proto describing a Tensor or SparseTensor or CompositeTensor. graph: The tf.Graph in which tensors are looked up. If None, the current default graph is used. import_scope: If not None, names in are prefixed with this string before lookup. Returns: The Tensor or SparseTensor or CompositeTensor in described by . Raises: KeyError: If does not correspond to a tensor in . ValueError: If is malformed.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\utils_impl.py", + "ast_data": "FunctionDef name:get_tensor_from_tensor_info arg:tensor_info arg:graph arg:import_scope arguments arg arg arg Assign BoolOp Call FunctionDef name:_get_tensor arg:name arguments arg Return return:yes Call Call Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Call Call Call If Compare Assign Call Assign Call Assign Call Return return:yes Call Raise Call Call Call" + }, + { + "library": "numpy", + "name": "MethodsV1NoComplex", + "source_code": "class MethodsV1NoComplex(Benchmark):\n params = [['__floordiv__', '__mod__'], [dt for dt in TYPES1 if not dt.startswith('complex')]]\n param_names = ['methods', 'npdtypes']\n timeout = 10\n\n def setup(self, methname, npdtypes):\n values = get_squares_().get(npdtypes)\n self.xargs = [values[0], values[1]]\n\n def time_ndarray_meth(self, methname, npdtypes):\n getattr(operator, methname)(*self.xargs)", + "docstring": "Benchmark for the methods which take an argument", + "type": "class", + "file_path": "numpy\\benchmarks\\benchmarks\\bench_ufunc.py", + "ast_data": "ClassDef name:MethodsV1NoComplex Assign Call Assign Assign FunctionDef name:setup arg:self arg:methname arg:npdtypes arguments arg arg arg Assign Call Call Assign FunctionDef name:time_ndarray_meth arg:self arg:methname arg:npdtypes arguments arg arg arg Call Call" + }, + { + "library": "matplotlib", + "name": "_get_font_family_and_reduced", + "source_code": "@classmethod\ndef _get_font_family_and_reduced(cls):\n ff = mpl.rcParams['font.family']\n ff_val = ff[0].lower() if len(ff) == 1 else None\n if len(ff) == 1 and ff_val in cls._font_families:\n return (ff_val, False)\n elif len(ff) == 1 and ff_val in cls._font_preambles:\n return (cls._font_types[ff_val], True)\n else:\n _log.info('font.family must be one of (%s) when text.usetex is True. serif will be used by default.', ', '.join(cls._font_families))\n return ('serif', False)", + "docstring": "Return the font family name and whether the font is reduced.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py", + "ast_data": "FunctionDef name:_get_font_family_and_reduced arg:cls arguments arg Assign Assign Compare Call Call If BoolOp Compare Call Compare Return return:yes If BoolOp Compare Call Compare Return return:yes Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "inverse", + "source_code": "def inverse(self, y, name='inverse'):\n return self._call_inverse(y, name)", + "docstring": "Returns the inverse evaluation, i.e., X = g^{-1}(Y). Args: y: . The input to the \"inverse\" evaluation. name: The name to give this op. Returns: , if this bijector is injective. If not injective, returns the k-tuple containing the unique points such that . Raises: TypeError: if is specified and is not . NotImplementedError: if is not implemented.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py", + "ast_data": "FunctionDef name:inverse arg:self arg:y arg:name arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "innermost_fn", + "source_code": "def innermost_fn(fn):\n unaltered_fn = fn\n while hasattr(unaltered_fn, '_torchdynamo_orig_callable'):\n unaltered_fn = unaltered_fn._torchdynamo_orig_callable\n assert callable(unaltered_fn), f'A callable function is expected, but {type(unaltered_fn)} is provided.'\n return unaltered_fn", + "docstring": "In case of nesting of _TorchDynamoContext calls, find the innermost function. TorchDynamo caches on fn.__code__ object, so its necessary to find the innermost function to pass on the optimize, run, disable etc.", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\eval_frame.py", + "ast_data": "FunctionDef name:innermost_fn arg:fn arguments arg Assign While Call Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "origin", + "source_code": "@property\ndef origin(self):\n return TransformPoint(self, 'origin')", + "docstring": "Coordinates of the raster origin.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py", + "ast_data": "FunctionDef name:origin arg:self arguments arg Return return:yes Call" + }, + { + "library": "sphinx", + "name": "fresh_env_used", + "source_code": "@property\ndef fresh_env_used(self) -> bool | None:\n return self._fresh_env_used", + "docstring": "True/False as to whether a new environment was created for this build, or None if the environment has not been initialised yet.", + "type": "method", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "FunctionDef name:fresh_env_used arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_lookup_dependency", + "source_code": "def _lookup_dependency(self, name, cached_dependencies=None):\n unconditional = super(LossScale, self)._lookup_dependency(name, cached_dependencies)\n if unconditional is not None:\n return unconditional\n if context.executing_eagerly():\n graph_key = None\n else:\n graph = ops.get_default_graph()\n graph_key = graph._graph_key\n return self._weights.get((name, graph_key), None)", + "docstring": "From Trackable. Find a weight in the current graph.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py", + "ast_data": "FunctionDef name:_lookup_dependency arg:self arg:name arg:cached_dependencies arguments arg arg arg Assign Call Call If Compare Return return:yes If Call Assign Assign Call Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "flip_cutlass_layout", + "source_code": "@staticmethod\ndef flip_cutlass_layout(cutlass_layout: 'cutlass_lib.LayoutType') -> 'cutlass_lib.LayoutType':\n assert cutlass_utils.try_import_cutlass()\n import cutlass_library.library as cutlass_lib\n if cutlass_layout == cutlass_lib.LayoutType.RowMajor:\n return cutlass_lib.LayoutType.ColumnMajor\n else:\n return cutlass_lib.LayoutType.RowMajor", + "docstring": "Helper method: Flips a given cutlass layout (cutlass_lib.LayoutType) from RowMajor to ColumnMajor or vice versa", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py", + "ast_data": "FunctionDef name:flip_cutlass_layout arg:cutlass_layout arguments arg Call If Compare Return return:yes Return return:yes" + }, + { + "library": "numpy", + "name": "rjust", + "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_just_dispatcher)\ndef rjust(a, width, fillchar=' '):\n width = np.asanyarray(width)\n if not np.issubdtype(width.dtype, np.integer):\n raise TypeError(f\"unsupported type {width.dtype} for operand 'width'\")\n a = np.asanyarray(a)\n fillchar = np.asanyarray(fillchar)\n if np.any(str_len(fillchar) != 1):\n raise TypeError('The fill character must be exactly one character long')\n if np.result_type(a, fillchar).char == 'T':\n return _rjust(a, width, fillchar)\n fillchar = fillchar.astype(a.dtype, copy=False)\n width = np.maximum(str_len(a), width)\n shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape)\n out_dtype = f'{a.dtype.char}{width.max()}'\n out = np.empty_like(a, shape=shape, dtype=out_dtype)\n return _rjust(a, width, fillchar, out=out)", + "docstring": "Return an array with the elements of right-justified in a string of length . Parameters ---------- a : array-like, with ``width >> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.rjust(a, width=3) array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.rjust(a, width=9) array([' aAaAaA', ' aA ', ' abBABba'], dtype=' Series:\n result = value_counts(np.asarray(self), dropna=dropna)\n result.index = result.index.astype(self.dtype)\n return result", + "docstring": "Returns a Series containing counts of each interval. Parameters ---------- dropna : bool, default True Don't include counts of NaN. Returns ------- counts : Series See Also -------- Series.value_counts", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\interval.py", + "ast_data": "FunctionDef name:value_counts arg:self arg:dropna arguments arg arg Assign Call Call Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_sync_debug_mode", + "source_code": "def get_sync_debug_mode() -> int:\n _lazy_init()\n return torch._C._cuda_get_sync_debug_mode()", + "docstring": "Return current value of debug mode for cuda synchronizing operations.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\__init__.py", + "ast_data": "FunctionDef name:get_sync_debug_mode arguments Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "construct_array_type", + "source_code": "def construct_array_type(self) -> type_t[BaseStringArray]:\n from pandas.core.arrays.string_arrow import ArrowStringArray, ArrowStringArrayNumpySemantics\n if self.storage == 'python' and self._na_value is libmissing.NA:\n return StringArray\n elif self.storage == 'pyarrow' and self._na_value is libmissing.NA:\n return ArrowStringArray\n elif self.storage == 'python':\n return StringArrayNumpySemantics\n else:\n return ArrowStringArrayNumpySemantics", + "docstring": "Return the array type associated with this dtype. Returns ------- type", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\string_.py", + "ast_data": "FunctionDef name:construct_array_type arg:self arguments arg If BoolOp Compare Compare Return return:yes If BoolOp Compare Compare Return return:yes If Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, module, method_name=None, **kwargs):\n super(ModuleWrapper, self).__init__(**kwargs)\n if method_name is None:\n if hasattr(module, '__call__'):\n method_name = '__call__'\n elif hasattr(module, 'call'):\n method_name = 'call'\n if method_name is None or not hasattr(module, method_name):\n raise ValueError('{} is not defined on object {}'.format(method_name, module))\n self._module = module\n self._method_name = method_name\n method = getattr(module, method_name)\n method_arg_spec = tf_inspect.getfullargspec(method)\n self._expects_training_arg = 'training' in method_arg_spec.args or method_arg_spec.varkw is not None\n self._expects_mask_arg = 'mask' in method_arg_spec.args or method_arg_spec.varkw is not None", + "docstring": "Initializes the wrapper Layer for this module. Args: module: The instance to be wrapped. method_name: (Optional) str. The name of the method to use as the forward pass of the module. If not set, defaults to '__call__' if defined, or 'call'. **kwargs: Additional keywrod arguments. See . Raises: ValueError: If is not defined on .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:module arg:method_name arguments arg arg arg arg Call Call If Compare If Call Assign If Call Assign If BoolOp Compare Call Raise Call Call Assign Assign Assign Call Assign Call Assign BoolOp Compare Compare Assign BoolOp Compare Compare" + }, + { + "library": "pytorch", + "name": "cdf", + "source_code": "def cdf(self, value: Tensor) -> Tensor:\n raise NotImplementedError", + "docstring": "Returns the cumulative density/mass function evaluated at . Args: value (Tensor):", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\distribution.py", + "ast_data": "FunctionDef name:cdf arg:self arg:value arguments arg arg Raise" + }, + { + "library": "scipy", + "name": "write_record", + "source_code": "def write_record(self, *items):\n items = tuple((np.asarray(item) for item in items))\n total_size = sum((item.nbytes for item in items))\n nb = np.array([total_size], dtype=self._header_dtype)\n nb.tofile(self._fp)\n for item in items:\n item.tofile(self._fp)\n nb.tofile(self._fp)", + "docstring": "Write a record (including sizes) to the file. Parameters ---------- *items : array_like The data arrays to write. Notes ----- Writes data items to a file:: write_record(a.T, b.T, c.T, ...) write(1) a, b, c, ... Note that data in multidimensional arrays is written in row-major order --- to make them read correctly by Fortran programs, you need to transpose the arrays yourself when writing them.", + "type": "method", + "file_path": "scipy\\scipy\\io\\_fortran.py", + "ast_data": "FunctionDef name:write_record arg:self arguments arg arg Assign Call Call Assign Call Assign Call Call For Call Call" + }, + { + "library": "matplotlib", + "name": "set_pickradius", + "source_code": "def set_pickradius(self, pickradius):\n if not isinstance(pickradius, Real) or pickradius < 0:\n raise ValueError('pick radius should be a distance')\n self._pickradius = pickradius", + "docstring": "Set the pick radius used for containment tests. See for more details. Parameters ---------- pickradius : float Pick radius, in points.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\lines.py", + "ast_data": "FunctionDef name:set_pickradius arg:self arg:pickradius arguments arg arg If BoolOp Call Compare Raise Call Assign" + }, + { + "library": "tensorflow", + "name": "unstack", + "source_code": "def unstack(self, value, name=None):\n tensors = array_ops_stack.unstack(value, name=name)\n if len(tensors) > len(self._tensor_array) and (not self._dynamic_size):\n raise ValueError('Cannot unstack %d tensors into a TensorArray of static size %d ' % (len(tensors), len(self._tensor_array)))\n self._tensor_array = tensors\n return self.parent()", + "docstring": "See TensorArray.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:unstack arg:self arg:value arg:name arguments arg arg arg Assign Call If BoolOp Compare Call Call Raise Call Call Call Assign Return return:yes Call" + }, + { + "library": "kornia", + "name": "translation", + "source_code": "@property\ndef translation(self) -> Vector2 | Parameter:\n return self._translation", + "docstring": "Return the underlying translation vector of shape :math:.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py", + "ast_data": "FunctionDef name:translation arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "set_reference_quantized_module", + "source_code": "def set_reference_quantized_module(self, reference_quantized_module: type[torch.nn.Module]) -> BackendPatternConfig:\n self.reference_quantized_module = reference_quantized_module\n return self", + "docstring": "Set the module that represents the reference quantized implementation for this pattern's root module. For more detail, see :func:.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py", + "ast_data": "FunctionDef name:set_reference_quantized_module arg:self arg:reference_quantized_module arguments arg arg Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "resolve_data", + "source_code": "@abc.abstractmethod\ndef resolve_data(self, write_item: WriteItem) -> Union[torch.Tensor, io.BytesIO]:\n pass", + "docstring": "Transform and prepare `` and apply any transformation (such as serialization) prior to the storage layer consuming it. Called on each rank multiple times, at least once per WriteItem in the final SavePlan. This method should be idempotent and thread-save. StorageWriter implementations are free to call it as frequently as they need. Any transformation that allocates memory should be lazily done when his method is called in order to reduce peak memory required by checkpointing. When returning tensors, they can be on any device or format, they can be views too. It's the storage layer responsibility to figure out how to save them.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py", + "ast_data": "FunctionDef name:resolve_data arg:self arg:write_item arguments arg arg" + }, + { + "library": "scipy", + "name": "nnz", + "source_code": "@property\ndef nnz(self) -> int:\n return self._getnnz()", + "docstring": "Number of stored values, including explicit zeros. See also -------- count_nonzero : Number of non-zero entries", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_base.py", + "ast_data": "FunctionDef name:nnz arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "custom_from_mask", + "source_code": "def custom_from_mask(module, name, mask):\n CustomFromMask.apply(module, name, mask)\n return module", + "docstring": "Prune tensor corresponding to parameter called `` on which pruning will act. mask (Tensor): binary mask to be applied to the parameter. Returns: module (nn.Module): modified (i.e. pruned) version of the input module Examples: >>> from torch.nn.utils import prune >>> m = prune.custom_from_mask( ... nn.Linear(5, 3), name='bias', mask=torch.tensor([0, 1, 0]) ... ) >>> print(m.bias_mask) tensor([0., 1., 0.])", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\prune.py", + "ast_data": "FunctionDef name:custom_from_mask arg:module arg:name arg:mask arguments arg arg arg Call Return return:yes" + }, + { + "library": "pandas", + "name": "construct_array_type", + "source_code": "@classmethod\ndef construct_array_type(cls) -> type_t[Categorical]:\n from pandas import Categorical\n return Categorical", + "docstring": "Return the array type associated with this dtype. Returns ------- type", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", + "ast_data": "FunctionDef name:construct_array_type arg:cls arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "zeros", + "source_code": "@dispatch.dispatch_for_api(array_ops.zeros)\ndef zeros(shape: dynamic_ragged_shape.DynamicRaggedShape, dtype=dtypes.float32, name=None, layout=None) -> ragged_tensor.RaggedOrDense:\n if layout is not None and (not layout.is_fully_replicated()):\n raise ValueError(f'RaggedTensor only allows replicated layout. got {layout}')\n flat_values = array_ops.zeros(shape.inner_shape, dtype=dtype, name=name, layout=layout)\n return shape._add_row_partitions(flat_values)", + "docstring": "Returns ones shaped like x.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py", + "ast_data": "FunctionDef name:zeros arg:shape arg:dtype arg:name arg:layout arguments arg arg arg arg If BoolOp Compare Call Raise Call Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "latency_experiment", + "source_code": "def latency_experiment(args, model_iter_fn, model, example_inputs, mark, **kwargs):\n timings = np.zeros((args.repeat,), np.float64)\n should_randomize_input = args.randomize_input\n import contextlib\n from torch._inductor.utils import maybe_profile\n\n @contextlib.contextmanager\n def maybe_mark_profile(*args, **kwargs):\n prof: torch.profiler.profile = kwargs.pop('p', None)\n mark = kwargs.pop('mark', None)\n if prof:\n with torch.profiler.record_function(mark):\n yield\n else:\n yield\n times = args.iterations_per_run\n with maybe_profile(args.export_profiler_trace, **args.profile_details) as p:\n for rep in trange(args.repeat, desc='running benchmark'):\n inputs = randomize_input(copy.deepcopy(example_inputs)) if should_randomize_input else example_inputs\n maybe_mark_step(args)\n with maybe_mark_profile(p=p, mark=mark):\n timings[rep], actual_output = timed(model, model_iter_fn, inputs, return_result=True, times=times, collect_outputs=args.collect_outputs)\n if args.export_profiler_trace:\n name = args.profiler_trace_name + '_' + model.name\n if hasattr(args, 'rank'):\n name += f'_rank_{args.rank}'\n name += '.json'\n name = os.path.join(torch._dynamo.config.base_dir, name)\n p.export_chrome_trace(name)\n return timings", + "docstring": "Measure latency on a specific backend.", + "type": "function", + "file_path": "pytorch\\benchmarks\\dynamo\\common.py", + "ast_data": "FunctionDef name:latency_experiment arg:args arg:model_iter_fn arg:model arg:example_inputs arg:mark arguments arg arg arg arg arg arg Assign Call Assign FunctionDef name:maybe_mark_profile arguments arg arg Call Assign Call If With Call Assign With Call For Call Assign Call Call Call With Call Assign Call If Assign If Call Assign Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "add_tools_to_manager", + "source_code": "def add_tools_to_manager(toolmanager, tools=default_tools):\n for name, tool in tools.items():\n toolmanager.add_tool(name, tool)", + "docstring": "Add multiple tools to a . Parameters ---------- toolmanager : Manager to which the tools are added. tools : {str: class_like}, optional The tools to add in a {name: tool} dict, see for more info.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", + "ast_data": "FunctionDef name:add_tools_to_manager arg:toolmanager arg:tools arguments arg arg For Call Call" + }, + { + "library": "scikit-learn", + "name": "DataDimensionalityWarning", + "source_code": "class DataDimensionalityWarning(UserWarning):\n pass", + "docstring": "Custom warning to notify potential issues with data dimensionality. For example, in random projection, this warning is raised when the number of components, which quantifies the dimensionality of the target projection space, is higher than the number of features, which quantifies the dimensionality of the original source space, to imply that the dimensionality of the problem will not be reduced. .. versionchanged:: 0.18 Moved from sklearn.utils.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\exceptions.py", + "ast_data": "ClassDef name:DataDimensionalityWarning" + }, + { + "library": "tensorflow", + "name": "get_all_plugin_assets", + "source_code": "def get_all_plugin_assets(graph=None):\n if graph is None:\n graph = ops.get_default_graph()\n out = []\n for name in graph.get_collection(_PLUGIN_ASSET_PREFIX):\n collection = graph.get_collection(_PLUGIN_ASSET_PREFIX + name)\n if len(collection) != 1:\n raise ValueError('Collection for %s had %d items, expected 1' % (name, len(collection)))\n out.append(collection[0])\n return out", + "docstring": "Retrieve all PluginAssets stored in the graph collection. Args: graph: Optionally, the graph to get assets from. If unspecified, the default graph is used. Returns: A list with all PluginAsset instances in the graph. Raises: ValueError: if we unexpectedly find a collection with the wrong number of PluginAssets.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\summary\\plugin_asset.py", + "ast_data": "FunctionDef name:get_all_plugin_assets arg:graph arguments arg If Compare Assign Call Assign For Call Assign Call If Compare Call Raise Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "setdefault", + "source_code": "def setdefault(self, key=None, default=None, kwargs=None):\n if key is None:\n key = self._key()\n kwargs = kwargs or {}\n if default is None and key not in self:\n default = self.default_factory(**kwargs)\n return weakref.WeakKeyDictionary.setdefault(self, key, default)", + "docstring": "Sets the default value if key is not in dict, and returns the value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:setdefault arg:self arg:key arg:default arg:kwargs arguments arg arg arg arg If Compare Assign Call Assign BoolOp If BoolOp Compare Compare Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "classify_jobs", + "source_code": "def classify_jobs(all_job_names: list[str], sha_grid: Any, filtered_jobs_names: set[str]) -> tuple[list[JobStatus], list[Any]]:\n job_data = map_job_data(all_job_names, sha_grid)\n job_statuses: list[JobStatus] = []\n for job in job_data:\n job_statuses.append(JobStatus(job, job_data[job]))\n jobs_to_alert_on = []\n flaky_jobs = []\n for job_status in job_statuses:\n if job_status.job_name not in filtered_jobs_names:\n continue\n if job_status.should_alert():\n jobs_to_alert_on.append(job_status)\n flaky_jobs.extend(job_status.flaky_jobs)\n return (jobs_to_alert_on, flaky_jobs)", + "docstring": "Creates Job Statuses which has the logic for if need to alert or if there's flaky jobs. Classifies jobs into jobs to alert on and flaky jobs. :param all_job_names: list of all job names as returned by the HUD :param sha_grid: list of all job data as returned by the HUD (parallel index to all_job_names) :param filtered_jobs_names: set of job names to actually consider :return:", + "type": "function", + "file_path": "pytorch\\tools\\alerts\\create_alerts.py", + "ast_data": "FunctionDef name:classify_jobs arg:all_job_names arg:sha_grid arg:filtered_jobs_names arguments arg arg arg Assign Call For Call Call Assign Assign For If Compare If Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "choose_dispatcher", + "source_code": "def choose_dispatcher(needs_autograd, aot_config):\n if aot_config.is_export:\n CompileEventLogger.try_add_pt2_compile('backend_compile', dispatch_mode='export')\n return partial(aot_dispatch_export, needs_autograd=needs_autograd)\n elif needs_autograd and (not aot_config.pre_dispatch):\n CompileEventLogger.try_add_pt2_compile('backend_compile', dispatch_mode='autograd')\n return aot_dispatch_autograd\n else:\n CompileEventLogger.try_add_pt2_compile('backend_compile', dispatch_mode='inference')\n return aot_dispatch_base", + "docstring": "Pick a dispatcher based on the config rules.", + "type": "function", + "file_path": "pytorch\\torch\\_functorch\\aot_autograd.py", + "ast_data": "FunctionDef name:choose_dispatcher arg:needs_autograd arg:aot_config arguments arg arg If Call Return return:yes Call If BoolOp Call Return return:yes Call Return return:yes" + }, + { + "library": "django", + "name": "resolve_model_init_order", + "source_code": "def resolve_model_init_order(self):\n converter = connections[self.db].introspection.identifier_converter\n model_init_fields = [field for column_name, field in self.model_fields.items() if column_name in self.columns]\n annotation_fields = [(column, pos) for pos, column in enumerate(self.columns) if column not in self.model_fields]\n model_init_order = [self.columns.index(converter(f.column)) for f in model_init_fields]\n model_init_names = [f.attname for f in model_init_fields]\n return (model_init_names, model_init_order, annotation_fields)", + "docstring": "Resolve the init field names and value positions.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:resolve_model_init_order arg:self arguments arg Assign Assign Call Compare Assign Call Compare Assign Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_mode", + "source_code": "@tf_contextlib.contextmanager\ndef _mode(self, mode):\n ctx = self._thread_local_data\n old_is_eager = ctx.is_eager\n ctx.is_eager = mode == EAGER_MODE\n if mode == EAGER_MODE:\n self.context_switches.push(False, eager_mode, None)\n try:\n yield\n finally:\n ctx.is_eager = old_is_eager\n if mode == EAGER_MODE:\n self.context_switches.pop()", + "docstring": "A context manager to allow setting the mode to EAGER/GRAPH.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:_mode arg:self arg:mode arguments arg arg Assign Assign Assign Compare If Compare Call Try Assign If Compare Call" + }, + { + "library": "tensorflow", + "name": "PermissionDeniedError", + "source_code": "@tf_export('errors.PermissionDeniedError')\nclass PermissionDeniedError(OpError):\n\n def __init__(self, node_def, op, message, *args):\n super(PermissionDeniedError, self).__init__(node_def, op, message, PERMISSION_DENIED, *args)", + "docstring": "Raised when the caller does not have permission to run an operation. For example, running the operation could raise if it receives the name of a file for which the user does not have the read file permission.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", + "ast_data": "ClassDef name:PermissionDeniedError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call" + }, + { + "library": "tensorflow", + "name": "_from_compatible_tensor_list", + "source_code": "def _from_compatible_tensor_list(self, tensor_list: List['core_types.Symbol']) -> Any:\n return self._from_components(nest.pack_sequence_as(self._component_specs, tensor_list, expand_composites=True))", + "docstring": "Reconstructs a value from a compatible flat list of . Args: tensor_list: A flat list of , compatible with . (Caller is responsible for ensuring compatibility.) Returns: A value that is compatible with this .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", + "ast_data": "FunctionDef name:_from_compatible_tensor_list arg:self arg:tensor_list arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_generate_jit_forward_graph", + "source_code": "def _generate_jit_forward_graph(self):\n scripted_op_bench = torch.jit.script(self.op_bench)\n return scripted_op_bench.forward_consume", + "docstring": "generate a graph for the forward function via scripting", + "type": "method", + "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_pytorch.py", + "ast_data": "FunctionDef name:_generate_jit_forward_graph arg:self arguments arg Assign Call Return return:yes" + }, + { + "library": "kornia", + "name": "euler_from_quaternion", + "source_code": "def euler_from_quaternion(w: Tensor, x: Tensor, y: Tensor, z: Tensor) -> tuple[Tensor, Tensor, Tensor]:\n KORNIA_CHECK(w.shape == x.shape)\n KORNIA_CHECK(x.shape == y.shape)\n KORNIA_CHECK(y.shape == z.shape)\n yy = y * y\n sinr_cosp = 2.0 * (w * x + y * z)\n cosr_cosp = 1.0 - 2.0 * (x * x + yy)\n roll = sinr_cosp.atan2(cosr_cosp)\n sinp = 2.0 * (w * y - z * x)\n sinp = sinp.clamp(min=-1.0, max=1.0)\n pitch = sinp.asin()\n siny_cosp = 2.0 * (w * z + x * y)\n cosy_cosp = 1.0 - 2.0 * (yy + z * z)\n yaw = siny_cosp.atan2(cosy_cosp)\n return (roll, pitch, yaw)", + "docstring": "Convert a quaternion coefficients to Euler angles. Returned angles are in radians in XYZ convention. Args: w: quaternion :math: coefficient. x: quaternion :math: coefficient. y: quaternion :math: coefficient. z: quaternion :math: coefficient. Return: A tuple with euler angles, , .", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\conversions.py", + "ast_data": "FunctionDef name:euler_from_quaternion arg:w arg:x arg:y arg:z arguments arg arg arg arg Call Compare Call Compare Call Compare Assign Assign Assign Assign Call Assign Assign Call Assign Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "check", + "source_code": "def check(self, value):\n unsqueezed_value = value.unsqueeze(-1 - self.event_dim)\n result = self.base_constraint.check(unsqueezed_value)\n if value.dim() < self.event_dim:\n raise ValueError(f'Expected value.dim() >= {self.event_dim} but got {value.dim()}')\n num_dim_to_keep = value.dim() - self.event_dim\n result = result.reshape(result.shape[:num_dim_to_keep] + (-1,))\n result = result.all(-1)\n return result", + "docstring": "Check validity of `~torch.distribution.MixtureSameFamily` distribution.", + "type": "method", + "file_path": "pytorch\\torch\\distributions\\constraints.py", + "ast_data": "FunctionDef name:check arg:self arg:value arguments arg arg Assign Call Assign Call If Compare Call Raise Call Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_apply_fn", + "source_code": "def _apply_fn(dataset):\n return dataset.group_by_window(key_func=key_func, reduce_func=reduce_func, window_size=window_size, window_size_func=window_size_func)", + "docstring": "Function from to that applies the transformation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\grouping.py", + "ast_data": "FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):\n self._more_validate_params()\n return self._fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, coef_init=coef_init, intercept_init=intercept_init, sample_weight=sample_weight)", + "docstring": "Fit linear model with Stochastic Gradient Descent. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples,) Target values. coef_init : ndarray of shape (n_classes, n_features), default=None The initial coefficients to warm-start the optimization. intercept_init : ndarray of shape (n_classes,), default=None The initial intercept to warm-start the optimization. sample_weight : array-like, shape (n_samples,), default=None Weights applied to individual samples. If not provided, uniform weights are assumed. These weights will be multiplied with class_weight (passed through the constructor) if class_weight is specified. Returns ------- self : object Returns an instance of self.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py", + "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:coef_init arg:intercept_init arg:sample_weight arguments arg arg arg arg arg arg Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "update_dimension_fields", + "source_code": "def update_dimension_fields(self, instance, force=False, *args, **kwargs):\n has_dimension_fields = self.width_field or self.height_field\n if not has_dimension_fields or self.attname not in instance.__dict__:\n return\n file = getattr(instance, self.attname)\n if not file and (not force):\n return\n dimension_fields_filled = not (self.width_field and (not getattr(instance, self.width_field)) or (self.height_field and (not getattr(instance, self.height_field))))\n if dimension_fields_filled and (not force):\n return\n if file:\n width = file.width\n height = file.height\n else:\n width = None\n height = None\n if self.width_field:\n setattr(instance, self.width_field, width)\n if self.height_field:\n setattr(instance, self.height_field, height)", + "docstring": "Update field's width and height fields, if defined. This method is hooked up to model's post_init signal to update dimensions after instantiating a model instance. However, dimensions won't be updated if the dimensions fields are already populated. This avoids unnecessary recalculation when loading an object from the database. Dimensions can be forced to update with force=True, which is how ImageFileDescriptor.__set__ calls this method.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\files.py", + "ast_data": "FunctionDef name:update_dimension_fields arg:self arg:instance arg:force arguments arg arg arg arg arg Assign BoolOp If BoolOp Compare Return return:no Assign Call If BoolOp Return return:no Assign BoolOp BoolOp Call BoolOp Call If BoolOp Return return:no If Assign Assign Assign Assign If Call If Call" + }, + { + "library": "sphinx", + "name": "create_pygments_style_file", + "source_code": "def create_pygments_style_file(self) -> None:\n pyg_path = self._static_dir / 'pygments.css'\n with open(pyg_path, 'w', encoding='utf-8') as f:\n f.write(self.highlighter.get_stylesheet())\n if self.dark_highlighter:\n dark_path = self._static_dir / 'pygments_dark.css'\n with open(dark_path, 'w', encoding='utf-8') as f:\n f.write(self.dark_highlighter.get_stylesheet())", + "docstring": "Create a style file for pygments.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\html\\__init__.py", + "ast_data": "FunctionDef name:create_pygments_style_file arg:self arguments arg Assign With Call Call Call If Assign With Call Call Call" + }, + { + "library": "scipy", + "name": "_z_to_zinv", + "source_code": "@staticmethod\ndef _z_to_zinv(num, den):\n diff = len(num) - len(den)\n if diff > 0:\n den = np.hstack((np.zeros(diff), den))\n elif diff < 0:\n num = np.hstack((np.zeros(-diff), num))\n return (num, den)", + "docstring": "Change a transfer function from the variable to . Parameters ---------- num, den: 1d array_like Sequences representing the coefficients of the numerator and denominator polynomials, in order of descending degree of 'z'. That is, ``.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_ltisys.py", + "ast_data": "FunctionDef name:_z_to_zinv arg:num arg:den arguments arg arg Assign Call Call If Compare Assign Call Call If Compare Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "OneDeviceStrategy", + "source_code": "@tf_export('distribute.OneDeviceStrategy', v1=[])\nclass OneDeviceStrategy(distribute_lib.Strategy):\n\n def __init__(self, device):\n super(OneDeviceStrategy, self).__init__(OneDeviceExtended(self, device))\n distribute_lib.distribution_strategy_gauge.get_cell('V2').set('OneDeviceStrategy')\n\n def experimental_distribute_dataset(self, dataset, options=None):\n return super(OneDeviceStrategy, self).experimental_distribute_dataset(dataset, options)\n\n def distribute_datasets_from_function(self, dataset_fn, options=None):\n return super(OneDeviceStrategy, self).distribute_datasets_from_function(dataset_fn, options)\n\n def experimental_local_results(self, value):\n return super(OneDeviceStrategy, self).experimental_local_results(value)\n\n def run(self, fn, args=(), kwargs=None, options=None):\n return super(OneDeviceStrategy, self).run(fn, args, kwargs, options)\n\n def reduce(self, reduce_op, value, axis):\n return super(OneDeviceStrategy, self).reduce(reduce_op, value, axis)\n\n def scope(self):\n return super(OneDeviceStrategy, self).scope()", + "docstring": "A distribution strategy for running on a single device. Using this strategy will place any variables created in its scope on the specified device. Input distributed through this strategy will be prefetched to the specified device. Moreover, any functions called via will also be placed on the specified device as well. Typical usage of this strategy could be testing your code with the tf.distribute.Strategy API before switching to other strategies which actually distribute to multiple devices/machines. For example:", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\one_device_strategy.py", + "ast_data": "ClassDef name:OneDeviceStrategy FunctionDef name:__init__ arg:self arg:device arguments arg arg Call Call Call Call Call FunctionDef name:experimental_distribute_dataset arg:self arg:dataset arg:options arguments arg arg arg Return return:yes Call Call FunctionDef name:distribute_datasets_from_function arg:self arg:dataset_fn arg:options arguments arg arg arg Return return:yes Call Call FunctionDef name:experimental_local_results arg:self arg:value arguments arg arg Return return:yes Call Call FunctionDef name:run arg:self arg:fn arg:args arg:kwargs arg:options arguments arg arg arg arg arg Return return:yes Call Call FunctionDef name:reduce arg:self arg:reduce_op arg:value arg:axis arguments arg arg arg arg Return return:yes Call Call FunctionDef name:scope arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_get_variables_dir", + "source_code": "def _get_variables_dir(export_dir):\n return os.path.join(compat.as_text(export_dir), compat.as_text(constants.VARIABLES_DIRECTORY))", + "docstring": "Return variables sub-directory in the SavedModel.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model_experimental.py", + "ast_data": "FunctionDef name:_get_variables_dir arg:export_dir arguments arg Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "get_attr", + "source_code": "@compatibility(is_backward_compatible=True)\ndef get_attr(self, target: 'Target', args: tuple[Argument, ...], kwargs: dict[str, Any]) -> Proxy:\n assert isinstance(target, str)\n return self.tracer.create_proxy('get_attr', target, args, kwargs)", + "docstring": "Execute a `Node `__ for details on semantics args (Tuple): Tuple of positional args for this invocation kwargs (Dict): Dict of keyword arguments for this invocation", + "type": "method", + "file_path": "pytorch\\torch\\fx\\interpreter.py", + "ast_data": "FunctionDef name:get_attr arg:self arg:target arg:args arg:kwargs arguments arg arg arg arg Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "float8_e5m2fnuz", + "source_code": "def float8_e5m2fnuz(self):\n _warn_typed_storage_removal()\n return self._to(torch.float8_e5m2fnuz)", + "docstring": "Casts this storage to float8_e5m2fnuz type", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:float8_e5m2fnuz arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__add__", + "source_code": "def __add__(self, other):\n return composite_transform_factory(self, other) if isinstance(other, Transform) else NotImplemented", + "docstring": "Compose two transforms together so that *self* is followed by *other*. ``.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:__add__ arg:self arg:other arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "broadcast_symbolic_shapes", + "source_code": "def broadcast_symbolic_shapes(a, b):\n output = []\n for x, y in itertools.zip_longest(reversed(a), reversed(b), fillvalue=sympy.S.One):\n if V.graph.sizevars.shape_env.evaluate_expr(sympy.Eq(y, 1), size_oblivious=True):\n output.append(x)\n elif V.graph.sizevars.shape_env.evaluate_expr(sympy.Eq(x, 1), size_oblivious=True):\n output.append(y)\n else:\n V.graph.sizevars.guard_equals(x, y)\n if len(sympy.expand(y).free_symbols) < len(sympy.expand(x).free_symbols):\n output.append(y)\n else:\n output.append(x)\n return tuple(reversed(output))", + "docstring": "Broadcasting logic based on symbolic shapes. We give the shapes 0 and 1 concrete values, while all other shapes are symbolic sympy formulas.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\lowering.py", + "ast_data": "FunctionDef name:broadcast_symbolic_shapes arg:a arg:b arguments arg arg Assign For Call Call Call If Call Call Call If Call Call Call Call If Compare Call Call Call Call Call Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "_version_and_breakpoints", + "source_code": "def _version_and_breakpoints(loca, fontdata):\n v1, v2, numTables = struct.unpack('>3h', fontdata[:6])\n version = (v1, v2)\n tables = {}\n for i in range(numTables):\n tag, _, offset, _ = struct.unpack('>4sIII', fontdata[12 + i * 16:12 + (i + 1) * 16])\n tables[tag.decode('ascii')] = offset\n if loca is not None:\n glyf_breakpoints = {tables['glyf'] + offset for offset in loca.locations[:-1]}\n else:\n glyf_breakpoints = set()\n breakpoints = sorted({*tables.values(), *glyf_breakpoints, len(fontdata)})\n return (version, breakpoints)", + "docstring": "Read the version number of the font and determine sfnts breakpoints. When a TrueType font file is written as a Type 42 font, it has to be broken into substrings of at most 65535 bytes. These substrings must begin at font table boundaries or glyph boundaries in the glyf table. This function determines all possible breakpoints and it is the caller's responsibility to do the splitting. Helper function for _font_to_ps_type42. Parameters ---------- loca : fontTools.ttLib._l_o_c_a.table__l_o_c_a or None The loca table of the font if available fontdata : bytes The raw data of the font Returns ------- version : tuple[int, int] A 2-tuple of the major version number and minor version number. breakpoints : list[int] The breakpoints is a sorted list of offsets into fontdata; if loca is not available, just the table boundaries.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py", + "ast_data": "FunctionDef name:_version_and_breakpoints arg:loca arg:fontdata arguments arg arg Assign Call Assign Assign For Call Assign Call Assign Call If Compare Assign Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "__init__", + "source_code": "def __init__(self, x, nn=None, index=None):\n self.x = x\n self.hash = hash(self.x)\n if nn is not None:\n self.nn = set(nn)\n else:\n self.nn = set()\n self.index = index", + "docstring": "Initiation of a vertex object. Parameters ---------- x : tuple or vector The geometric location (domain). nn : list, optional Nearest neighbour list. index : int, optional Index of vertex.", + "type": "method", + "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:x arg:nn arg:index arguments arg arg arg arg Assign Assign Call If Compare Assign Call Assign Call Assign" + }, + { + "library": "numpy", + "name": "_merge", + "source_code": "def _merge(old, new):\n if new in old:\n return old\n if not old:\n return new\n return ';'.join([old, new])", + "docstring": "Concatenate two environment paths avoiding repeats. Here is the environment string before the base class initialize function is called and is the string after the call. The new string will be a fixed string if it is not obtained from the current environment, or the same as the old string if obtained from the same environment. The aim here is not to append the new string if it is already contained in the old string so as to limit the growth of the environment string. Parameters ---------- old : string Previous environment string. new : string New environment string. Returns ------- ret : string Updated environment string.", + "type": "function", + "file_path": "numpy\\numpy\\distutils\\msvccompiler.py", + "ast_data": "FunctionDef name:_merge arg:old arg:new arguments arg arg If Compare Return return:yes If Return return:yes Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_get_tick", + "source_code": "def _get_tick(self, major):\n if self._tick_class is None:\n raise NotImplementedError(f'The Axis subclass {self.__class__.__name__} must define _tick_class or reimplement _get_tick()')\n tick_kw = self._major_tick_kw if major else self._minor_tick_kw\n return self._tick_class(self.axes, 0, major=major, **tick_kw)", + "docstring": "Return the default tick instance.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:_get_tick arg:self arg:major arguments arg arg If Compare Raise Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "aggregate_single_gradient_using_copy", + "source_code": "def aggregate_single_gradient_using_copy(grad_and_vars, use_mean, check_inf_nan):\n grads = [g for g, _ in grad_and_vars]\n grad = math_ops.add_n(grads)\n if use_mean and len(grads) > 1:\n grad = array_ops.multiply(grad, 1.0 / len(grads))\n v = grad_and_vars[0][1]\n if check_inf_nan:\n has_nan_or_inf = array_ops.logical_not(array_ops.reduce_all(array_ops.is_finite(grads)))\n return ((grad, v), has_nan_or_inf)\n else:\n return ((grad, v), None)", + "docstring": "Calculate the average gradient for a shared variable across all replicas. Note that this function provides a synchronization point across all replicas. Args: grad_and_vars: A list or tuple of (gradient, variable) tuples. Each (gradient, variable) pair within the outer list represents the gradient of the variable calculated for a single replica, and the number of pairs equals the number of replicas. use_mean: if True, mean is taken, else sum of gradients is taken. check_inf_nan: check grads for nans and infs. Returns: The tuple ([(average_gradient, variable),], has_nan_or_inf) where the gradient has been averaged across all replicas. The variable is chosen from the first replica. The has_nan_or_inf indicates the grads has nan or inf.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py", + "ast_data": "FunctionDef name:aggregate_single_gradient_using_copy arg:grad_and_vars arg:use_mean arg:check_inf_nan arguments arg arg arg Assign Assign Call If BoolOp Compare Call Assign Call Call Assign If Assign Call Call Call Return return:yes Return return:yes" + }, + { + "library": "authlib", + "name": "JWEHeader", + "source_code": "class JWEHeader(dict):\n\n def __init__(self, protected, unprotected, header):\n obj = {}\n if protected:\n obj.update(protected)\n if unprotected:\n obj.update(unprotected)\n if header:\n obj.update(header)\n super().__init__(obj)\n self.protected = protected if protected else {}\n self.unprotected = unprotected if unprotected else {}\n self.header = header if header else {}", + "docstring": "Header object for JWE. Combines protected header, shared unprotected header and specific recipient's unprotected header together.", + "type": "class", + "file_path": "authlib\\authlib\\jose\\rfc7516\\models.py", + "ast_data": "ClassDef name:JWEHeader FunctionDef name:__init__ arg:self arg:protected arg:unprotected arg:header arguments arg arg arg arg Assign If Call If Call If Call Call Call Assign Assign Assign" + }, + { + "library": "pytorch", + "name": "dedupe_symints", + "source_code": "def dedupe_symints(graph: torch.fx.Graph):\n sym_dict = _SymHashingDict()\n resolvable_from_input_symints = OrderedSet[Any]()\n for node in graph.nodes:\n val = node.meta.get('val', None)\n if val is None or not isinstance(val, py_sym_types):\n continue\n if node.op == 'placeholder':\n resolvable_from_input_symints.add(node)\n sym_dict[val] = node\n elif (existing_node := sym_dict.get(val)):\n node.replace_all_uses_with(existing_node)\n graph.erase_node(node)\n elif all((n in resolvable_from_input_symints for n in node.all_input_nodes)):\n sym_dict[val] = node\n resolvable_from_input_symints.add(node)", + "docstring": "Dedupes sym ints in the graph to nodes are resolvable to symint graph inputs. We only dedupe from graph inputs to avoid adding a potential dependency in the forward from the backward.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\dedupe_symint_uses.py", + "ast_data": "FunctionDef name:dedupe_symints arg:graph arguments arg Assign Call Assign Call For Assign Call If BoolOp Compare Call If Compare Call Assign If Call Call Call If Call Compare Assign Call" + }, + { + "library": "django", + "name": "annotation_select", + "source_code": "@property\ndef annotation_select(self):\n if self._annotation_select_cache is not None:\n return self._annotation_select_cache\n elif not self.annotations:\n return {}\n elif self.annotation_select_mask is not None:\n self._annotation_select_cache = {k: v for k, v in self.annotations.items() if k in self.annotation_select_mask}\n return self._annotation_select_cache\n else:\n return self.annotations", + "docstring": "Return the dictionary of aggregate columns that are not masked and should be used in the SELECT clause. Cache this result for performance.", + "type": "method", + "file_path": "django\\django\\db\\models\\sql\\query.py", + "ast_data": "FunctionDef name:annotation_select arg:self arguments arg If Compare Return return:yes If Return return:no If Compare Assign Call Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "load_variable", + "source_code": "@tf_export('train.load_variable')\ndef load_variable(ckpt_dir_or_file, name):\n if name.endswith(':0'):\n name = name[:-2]\n reader = load_checkpoint(ckpt_dir_or_file)\n return reader.get_tensor(name)", + "docstring": "Returns the tensor value of the given variable in the checkpoint. When the variable name is unknown, you can use to inspect all the variable names. Example usage: Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint. name: Name of the variable to return. Returns: A numpy with a copy of the value of this variable.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\checkpoint_utils.py", + "ast_data": "FunctionDef name:load_variable arg:ckpt_dir_or_file arg:name arguments arg arg If Call Assign Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "resource_tracker_scope", + "source_code": "@tf_contextlib.contextmanager\ndef resource_tracker_scope(resource_tracker):\n global _RESOURCE_TRACKER_STACK\n old = list(_RESOURCE_TRACKER_STACK)\n _RESOURCE_TRACKER_STACK.append(resource_tracker)\n try:\n yield\n finally:\n _RESOURCE_TRACKER_STACK = old", + "docstring": "A context to manage resource trackers. Use this in order to collect up all resources created within a block of code. Example usage: ```python resource_tracker = ResourceTracker() with resource_tracker_scope(resource_tracker): resource = TrackableResource() assert resource_tracker.resources == [resource] Args: resource_tracker: The passed in ResourceTracker object Yields: A scope in which the resource_tracker is active.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\trackable\\resource.py", + "ast_data": "FunctionDef name:resource_tracker_scope arg:resource_tracker arguments arg Assign Call Call Try Assign" + }, + { + "library": "scikit-learn", + "name": "_raise_for_params", + "source_code": "def _raise_for_params(params, owner, method, allow=None):\n caller = f'{owner.__class__.__name__}.{method}' if method else owner.__class__.__name__\n allow = allow if allow is not None else {}\n if not _routing_enabled() and params.keys() - allow:\n raise ValueError(f'Passing extra keyword arguments to {caller} is only supported if enable_metadata_routing=True, which you can set using `sklearn.set_config`. See the User Guide for more details. Extra parameters passed are: {set(params)}')", + "docstring": "Raise an error if metadata routing is not enabled and params are passed. .. versionadded:: 1.4 Parameters ---------- params : dict The metadata passed to a method. owner : object The object to which the method belongs. method : str The name of the method, e.g. \"fit\". allow : list of str, default=None A list of parameters which are allowed to be passed even if metadata routing is not enabled. Raises ------ ValueError If metadata routing is not enabled and params are passed.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py", + "ast_data": "FunctionDef name:_raise_for_params arg:params arg:owner arg:method arg:allow arguments arg arg arg arg Assign Assign Compare If BoolOp Call Call Raise Call Call" + }, + { + "library": "tensorflow", + "name": "ProfileAnalysisServicer", + "source_code": "class ProfileAnalysisServicer(object):\n\n def NewSession(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def EnumSessions(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetSessionToolData(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", + "docstring": "////////////////////////////////////////////////////////////////////////////// ProfileAnalysis service provide entry point for profiling TPU and for serving profiled data to Tensorboard through GRPC //////////////////////////////////////////////////////////////////////////////", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\profiler\\profiler_analysis_pb2_grpc.py", + "ast_data": "ClassDef name:ProfileAnalysisServicer FunctionDef name:NewSession arg:self arg:request arg:context arguments arg arg arg Call Call Raise Call FunctionDef name:EnumSessions arg:self arg:request arg:context arguments arg arg arg Call Call Raise Call FunctionDef name:GetSessionToolData arg:self arg:request arg:context arguments arg arg arg Call Call Raise Call" + }, + { + "library": "kornia", + "name": "SSIMLoss", + "source_code": "class SSIMLoss(nn.Module):\n\n def __init__(self, window_size: int, max_val: float=1.0, eps: float=1e-12, reduction: str='mean', padding: str='same') -> None:\n super().__init__()\n self.window_size: int = window_size\n self.max_val: float = max_val\n self.eps: float = eps\n self.reduction: str = reduction\n self.padding: str = padding\n\n def forward(self, img1: torch.Tensor, img2: torch.Tensor) -> torch.Tensor:\n return ssim_loss(img1, img2, self.window_size, self.max_val, self.eps, self.reduction, self.padding)", + "docstring": "Create a criterion that computes a loss based on the SSIM measurement. The loss, or the Structural dissimilarity (DSSIM) is described as: .. math:: \\text{loss}(x, y) = \\frac{1 - \\text{SSIM}(x, y)}{2} See :meth: for details about SSIM. Args: window_size: the size of the gaussian kernel to smooth the images. max_val: the dynamic range of the images. eps: Small value for numerically stability when dividing. reduction : Specifies the reduction to apply to the output: ``. Whether to only use the \"valid\" convolution area to compute SSIM to match the MATLAB implementation of original SSIM paper. Returns: The loss based on the ssim index. Examples: >>> input1 = torch.rand(1, 4, 5, 5) >>> input2 = torch.rand(1, 4, 5, 5) >>> criterion = SSIMLoss(5) >>> loss = criterion(input1, input2)", + "type": "class", + "file_path": "kornia\\kornia\\losses\\ssim.py", + "ast_data": "ClassDef name:SSIMLoss FunctionDef name:__init__ arg:self arg:window_size arg:max_val arg:eps arg:reduction arg:padding arguments arg arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:img1 arg:img2 arguments arg arg arg Return return:yes Call" + }, + { + "library": "authlib", + "name": "sign", + "source_code": "def sign(self, msg, key):\n raise NotImplementedError", + "docstring": "Sign the text msg with a private/sign key. :param msg: message bytes to be signed :param key: private key to sign the message :return: bytes", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7515\\models.py", + "ast_data": "FunctionDef name:sign arg:self arg:msg arg:key arguments arg arg arg Raise" + }, + { + "library": "tensorflow", + "name": "_build_shuffle_gather", + "source_code": "def _build_shuffle_gather(input_tensors, gather_devices, red_op, un_op=None):\n num_source_devices = len(input_tensors)\n num_gather_devices = len(gather_devices)\n shape = input_tensors[0].shape\n if len(shape) != 1:\n raise ValueError('input_tensors must be 1D')\n shards_by_source = []\n for d in range(0, num_source_devices):\n with ops.colocate_with(input_tensors[d]):\n shards_by_source.append(_ragged_split(input_tensors[d], num_gather_devices))\n reduced_shards = []\n for d in range(0, num_gather_devices):\n with ops.device(gather_devices[d]):\n values = [s[d] for s in shards_by_source]\n red_shard = red_op(values)\n if un_op:\n red_shard = un_op(red_shard)\n reduced_shards.append(red_shard)\n return reduced_shards", + "docstring": "Construct the gather (concentrate and reduce) phase of shuffle all-reduce. Args: input_tensors: list of values to be reduced. gather_devices: list of names of devices on which reduction shards should be placed. red_op: the binary reduction Op un_op: optional elementwise unary Op to be applied to fully-reduced values. Returns: list of which are the fully reduced shards. Raises: ValueError: inputs not well-formed.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py", + "ast_data": "FunctionDef name:_build_shuffle_gather arg:input_tensors arg:gather_devices arg:red_op arg:un_op arguments arg arg arg arg Assign Call Assign Call Assign If Compare Call Raise Call Assign For Call With Call Call Call Assign For Call With Call Assign Assign Call If Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "create_dir", + "source_code": "@tf_export(v1=['gfile.MkDir'])\ndef create_dir(dirname):\n create_dir_v2(dirname)", + "docstring": "Creates a directory with the name . Args: dirname: string, name of the directory to be created Notes: The parent directories need to exist. Use instead if there is the possibility that the parent dirs don't exist. Raises: errors.OpError: If the operation fails.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", + "ast_data": "FunctionDef name:create_dir arg:dirname arguments arg Call Call" + }, + { + "library": "django", + "name": "loads", + "source_code": "def loads(s, key=None, salt='django.core.signing', serializer=JSONSerializer, max_age=None, fallback_keys=None):\n return TimestampSigner(key=key, salt=salt, fallback_keys=fallback_keys).unsign_object(s, serializer=serializer, max_age=max_age)", + "docstring": "Reverse of dumps(), raise BadSignature if signature fails. The serializer is expected to accept a bytestring.", + "type": "function", + "file_path": "django\\django\\core\\signing.py", + "ast_data": "FunctionDef name:loads arg:s arg:key arg:salt arg:serializer arg:max_age arg:fallback_keys arguments arg arg arg arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_create_categorical_column_weighted_sum", + "source_code": "def _create_categorical_column_weighted_sum(column, builder, units, sparse_combiner, weight_collections, trainable, weight_var=None):\n sparse_tensors = column._get_sparse_tensors(builder, weight_collections=weight_collections, trainable=trainable)\n id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [array_ops.shape(sparse_tensors.id_tensor)[0], -1])\n weight_tensor = sparse_tensors.weight_tensor\n if weight_tensor is not None:\n weight_tensor = sparse_ops.sparse_reshape(weight_tensor, [array_ops.shape(weight_tensor)[0], -1])\n if weight_var is not None:\n weight = weight_var\n else:\n weight = variable_scope.get_variable(name='weights', shape=(column._num_buckets, units), initializer=init_ops.zeros_initializer(), trainable=trainable, collections=weight_collections)\n return embedding_ops.safe_embedding_lookup_sparse(weight, id_tensor, sparse_weights=weight_tensor, combiner=sparse_combiner, name='weighted_sum')", + "docstring": "Create a weighted sum of a categorical column for linear_model. Note to maintainer: As implementation details, the weighted sum is implemented via embedding_lookup_sparse toward efficiency. Mathematically, they are the same. To be specific, conceptually, categorical column can be treated as multi-hot vector. Say: The weighted sum is in this case, which is same as . Another example is The weighted sum is in this case, which is same as . For both cases, we can implement weighted sum via embedding_lookup with sparse_combiner = \"sum\".", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_create_categorical_column_weighted_sum arg:column arg:builder arg:units arg:sparse_combiner arg:weight_collections arg:trainable arg:weight_var arguments arg arg arg arg arg arg arg Assign Call Assign Call Call Assign If Compare Assign Call Call If Compare Assign Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "in_top_k", + "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef in_top_k(predictions, targets, k):\n return nn.in_top_k(predictions, targets, k)", + "docstring": "Returns whether the are in the top . Args: predictions: A tensor of shape and type . targets: A 1D tensor of length and type or . k: An , number of top elements to consider. Returns: A 1D tensor of length and type . is if is within top- values of .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:in_top_k arg:predictions arg:targets arg:k arguments arg arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "estimate", + "source_code": "def estimate(self, f, a, b, args=()):\n nodes, weights = self.nodes_and_weights\n if self.xp is None:\n self.xp = array_namespace(nodes)\n return _apply_fixed_rule(f, a, b, nodes, weights, args, self.xp)", + "docstring": "Calculate estimate of integral of in rectangular region described by corners and as `[-1, 1]^n[a, b]^nffxestimateffest`.", + "type": "method", + "file_path": "scipy\\scipy\\integrate\\_rules\\_base.py", + "ast_data": "FunctionDef name:estimate arg:self arg:f arg:a arg:b arg:args arguments arg arg arg arg arg Assign If Compare Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "ElasticDistributedSampler", + "source_code": "class ElasticDistributedSampler(DistributedSampler):\n\n def __init__(self, dataset, num_replicas=None, rank=None, start_index=0):\n super().__init__(dataset=dataset, num_replicas=num_replicas, rank=rank)\n if start_index >= len(dataset):\n raise ValueError(f'Start index {start_index} should be less than dataset size {len(dataset)}')\n self.start_index = start_index\n self.num_samples = int(math.ceil(float(len(self.dataset) - self.start_index) / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n\n def __iter__(self):\n g = torch.Generator()\n g.manual_seed(self.epoch)\n indices = torch.randperm(len(self.dataset) - self.start_index, generator=g).add(self.start_index).tolist()\n indices += indices[:self.total_size - len(indices)]\n assert len(indices) == self.total_size\n indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n return iter(indices)\n\n def __len__(self):\n return self.num_samples", + "docstring": "Sampler that restricts data loading to a subset of the dataset for elastic training. It is especially useful in conjunction with :class:. In such case, each process can pass a DistributedSampler instance as a DataLoader sampler, and load a subset of the original dataset that is exclusive to it. .. note:: Dataset is assumed to be of constant size. Args: dataset: Dataset used for sampling. num_replicas (optional): Number of processes participating in distributed training. rank (optional): Rank of the current process within num_replicas. start_index (optional): Which index of the dataset to start sampling from", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\elastic\\utils\\data\\elastic_distributed_sampler.py", + "ast_data": "ClassDef name:ElasticDistributedSampler FunctionDef name:__init__ arg:self arg:dataset arg:num_replicas arg:rank arg:start_index arguments arg arg arg arg arg Call Call If Compare Call Raise Call Call Assign Assign Call Call Call Call Assign FunctionDef name:__iter__ arg:self arguments arg Assign Call Call Assign Call Call Call Call Call Compare Call Assign Compare Call Return return:yes Call FunctionDef name:__len__ arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "ensure_dtype_objs", + "source_code": "def ensure_dtype_objs(dtype: DtypeArg | dict[Hashable, DtypeArg] | None) -> DtypeObj | dict[Hashable, DtypeObj] | None:\n if isinstance(dtype, defaultdict):\n default_dtype = pandas_dtype(dtype.default_factory())\n dtype_converted: defaultdict = defaultdict(lambda: default_dtype)\n for key in dtype.keys():\n dtype_converted[key] = pandas_dtype(dtype[key])\n return dtype_converted\n elif isinstance(dtype, dict):\n return {k: pandas_dtype(dtype[k]) for k in dtype}\n elif dtype is not None:\n return pandas_dtype(dtype)\n return dtype", + "docstring": "Ensure we have either None, a dtype object, or a dictionary mapping to dtype objects.", + "type": "function", + "file_path": "pandas\\pandas\\io\\parsers\\c_parser_wrapper.py", + "ast_data": "FunctionDef name:ensure_dtype_objs arg:dtype arguments arg If Call Assign Call Call Call arguments For Call Assign Call Return return:yes If Call Return return:yes Call If Compare Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "distributions_and_v1_optimizers", + "source_code": "def distributions_and_v1_optimizers():\n return combinations.combine(distribution=[strategy_combinations_base.one_device_strategy, strategy_combinations_base.mirrored_strategy_with_gpu_and_cpu, strategy_combinations_base.mirrored_strategy_with_two_gpus, strategy_combinations_base.mirrored_strategy_with_two_gpus_no_merge_call], optimizer_fn=optimizers_v1)", + "docstring": "A common set of combination with DistributionStrategies and Optimizers.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\optimizer_combinations.py", + "ast_data": "FunctionDef name:distributions_and_v1_optimizers arguments Return return:yes Call" + }, + { + "library": "scipy", + "name": "distance_matrix", + "source_code": "def distance_matrix(x, y, p=2, threshold=1000000):\n x = np.asarray(x)\n m, k = x.shape\n y = np.asarray(y)\n n, kk = y.shape\n if k != kk:\n raise ValueError(f'x contains {k}-dimensional vectors but y contains {kk}-dimensional vectors')\n if m * n * k <= threshold:\n return minkowski_distance(x[:, np.newaxis, :], y[np.newaxis, :, :], p)\n else:\n result = np.empty((m, n), dtype=float)\n if m < n:\n for i in range(m):\n result[i, :] = minkowski_distance(x[i], y, p)\n else:\n for j in range(n):\n result[:, j] = minkowski_distance(x, y[j], p)\n return result", + "docstring": "Compute the distance matrix. Returns the matrix of all pair-wise distances. Parameters ---------- x : (M, K) array_like Matrix of M vectors in K dimensions. y : (N, K) array_like Matrix of N vectors in K dimensions. p : float, 1 , algorithm uses a Python loop instead of large temporary arrays. Returns ------- result : (M, N) ndarray Matrix containing the distance from every vector in to every vector in . Examples -------- >>> from scipy.spatial import distance_matrix >>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]]) array([[ 1. , 1.41421356], [ 1.41421356, 1. ]])", + "type": "function", + "file_path": "scipy\\scipy\\spatial\\_kdtree.py", + "ast_data": "FunctionDef name:distance_matrix arg:x arg:y arg:p arg:threshold arguments arg arg arg arg Assign Call Assign Assign Call Assign If Compare Raise Call If Compare Return return:yes Call Assign Call If Compare For Call Assign Call For Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "get_indexer", + "source_code": "def get_indexer(current_indexer: Index, other_indexer: Index) -> Index:\n if method == 'nsmallest':\n return current_indexer.append(other_indexer)\n else:\n return other_indexer.append(current_indexer)", + "docstring": "Helper function to concat and depending on", + "type": "method", + "file_path": "pandas\\pandas\\core\\methods\\selectn.py", + "ast_data": "FunctionDef name:get_indexer arg:current_indexer arg:other_indexer arguments arg arg If Compare Return return:yes Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, format_dict, formatter=None):\n super().__init__()\n self._format_dict = format_dict\n self._fallback_formatter = formatter", + "docstring": "format_dict : dictionary for format strings to be used. formatter : fall-back formatter", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_finder.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:format_dict arg:formatter arguments arg arg arg Call Call Assign Assign" + }, + { + "library": "kornia", + "name": "Jr", + "source_code": "@staticmethod\ndef Jr(vec: Tensor) -> Tensor:\n return So3.right_jacobian(vec)", + "docstring": "Alias for right jacobian. Args: vec: the input point of shape :math:.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py", + "ast_data": "FunctionDef name:Jr arg:vec arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, session_root, watch_fn=None, thread_name_filter=None):\n self._session_root = session_root\n self._watch_fn = watch_fn\n self._thread_name_filter = thread_name_filter\n self._session_wrapper = None", + "docstring": "Create a local debugger command-line interface (CLI) hook. Args: session_root: See doc of . watch_fn: See doc of . thread_name_filter: Regular-expression white list for threads on which the wrapper session will be active. See doc of for more details.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\hooks.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:session_root arg:watch_fn arg:thread_name_filter arguments arg arg arg arg Assign Assign Assign Assign" + }, + { + "library": "scikit-learn", + "name": "predict_proba", + "source_code": "@available_if(_estimator_has('predict_proba'))\ndef predict_proba(self, X):\n check_is_fitted(self)\n return self.estimator_.predict_proba(self.transform(X))", + "docstring": "Predict class probabilities for X. Parameters ---------- X : {array-like or sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to `classes_`.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_selection\\_rfe.py", + "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Return return:yes Call Call Call Call" + }, + { + "library": "authlib", + "name": "authenticate_client", + "source_code": "def authenticate_client(self, request, methods, endpoint='token'):\n if self._client_auth is None and self.query_client:\n self._client_auth = ClientAuthentication(self.query_client)\n return self._client_auth(request, methods, endpoint)", + "docstring": "Authenticate client via HTTP request information with the given methods, such as ``.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py", + "ast_data": "FunctionDef name:authenticate_client arg:self arg:request arg:methods arg:endpoint arguments arg arg arg arg If BoolOp Compare Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "formset_factory", + "source_code": "def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False, can_delete=False, max_num=None, validate_max=False, min_num=None, validate_min=False, absolute_max=None, can_delete_extra=True, renderer=None):\n if min_num is None:\n min_num = DEFAULT_MIN_NUM\n if max_num is None:\n max_num = DEFAULT_MAX_NUM\n if absolute_max is None:\n absolute_max = max_num + DEFAULT_MAX_NUM\n if max_num > absolute_max:\n raise ValueError(\"'absolute_max' must be greater or equal to 'max_num'.\")\n attrs = {'form': form, 'extra': extra, 'can_order': can_order, 'can_delete': can_delete, 'can_delete_extra': can_delete_extra, 'min_num': min_num, 'max_num': max_num, 'absolute_max': absolute_max, 'validate_min': validate_min, 'validate_max': validate_max, 'renderer': renderer}\n form_name = form.__name__\n if form_name.endswith('Form'):\n formset_name = form_name + 'Set'\n else:\n formset_name = form_name + 'FormSet'\n return type(formset_name, (formset,), attrs)", + "docstring": "Return a FormSet for the given form class.", + "type": "function", + "file_path": "django\\django\\forms\\formsets.py", + "ast_data": "FunctionDef name:formset_factory arg:form arg:formset arg:extra arg:can_order arg:can_delete arg:max_num arg:validate_max arg:min_num arg:validate_min arg:absolute_max arg:can_delete_extra arg:renderer arguments arg arg arg arg arg arg arg arg arg arg arg arg If Compare Assign If Compare Assign If Compare Assign If Compare Raise Call Assign Assign If Call Assign Assign Return return:yes Call" + }, + { + "library": "seaborn", + "name": "tick", + "source_code": "def tick(self, locator: Locator | None=None, *, at: Sequence[float] | None=None, upto: int | None=None, count: int | None=None, every: float | None=None, between: tuple[float, float] | None=None, minor: int | None=None) -> Continuous:\n if locator is not None and (not isinstance(locator, Locator)):\n raise TypeError(f'Tick locator must be an instance of {Locator!r}, not {type(locator)!r}.')\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if log_base or symlog_thresh:\n if count is not None and between is None:\n raise RuntimeError('`count` requires `between` with log transform.')\n if every is not None:\n raise RuntimeError('`every` not supported with log transform.')\n new = copy(self)\n new._tick_params = {'locator': locator, 'at': at, 'upto': upto, 'count': count, 'every': every, 'between': between, 'minor': minor}\n return new", + "docstring": "Configure the selection of ticks for the scale's axis or legend. Parameters ---------- locator : :class: subclass Pre-configured matplotlib locator; other parameters will not be used. at : sequence of floats Place ticks at these specific locations (in data units). upto : int Choose \"nice\" locations for ticks, but do not exceed this number. count : int Choose exactly this number of ticks, bounded by or axis limits. every : float Choose locations at this interval of separation (in data units). between : pair of floats Bound upper / lower ticks when using or . minor : int Number of unlabeled ticks to draw between labeled \"major\" ticks. Returns ------- scale Copy of self with new tick configuration.", + "type": "method", + "file_path": "seaborn\\seaborn\\_core\\scales.py", + "ast_data": "FunctionDef name:tick arg:self arg:locator arguments arg arg arg arg arg arg arg arg If BoolOp Compare Call Raise Call Call Assign Call If BoolOp If BoolOp Compare Compare Raise Call If Compare Raise Call Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "isframe", + "source_code": "def isframe(object):\n return _inspect.isframe(tf_decorator.unwrap(object)[1])", + "docstring": "TFDecorator-aware replacement for inspect.ismodule.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py", + "ast_data": "FunctionDef name:isframe arg:object arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "set_max_tuning_duration", + "source_code": "def set_max_tuning_duration(duration: int) -> None:\n torch._C._cuda_tunableop_set_max_tuning_duration(duration)", + "docstring": "Set max time in milliseconds to spend tuning a given solution. If both max tuning duration and iterations are set, the smaller of the two will be honored. At minimum 1 tuning iteration will always be run.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\tunable.py", + "ast_data": "FunctionDef name:set_max_tuning_duration arg:duration arguments arg Call" + }, + { + "library": "matplotlib", + "name": "set_linestyle", + "source_code": "def set_linestyle(self, ls):\n if isinstance(ls, str):\n if ls in [' ', '', 'none']:\n ls = 'None'\n _api.check_in_list([*self._lineStyles, *ls_mapper_r], ls=ls)\n if ls not in self._lineStyles:\n ls = ls_mapper_r[ls]\n self._linestyle = ls\n else:\n self._linestyle = '--'\n self._unscaled_dash_pattern = _get_dash_pattern(ls)\n self._dash_pattern = _scale_dashes(*self._unscaled_dash_pattern, self._linewidth)\n self.stale = True", + "docstring": "Set the linestyle of the line. Parameters ---------- ls : {'-', '--', '-.', ':', '', (offset, on-off-seq), ...} Possible values: - A string: ======================================================= ================ linestyle description ======================================================= ================ `set_dashes/gallery/lines_bars_and_markers/linestyles`.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\lines.py", + "ast_data": "FunctionDef name:set_linestyle arg:self arg:ls arguments arg arg If Call If Compare Assign Call If Compare Assign Assign Assign Assign Call Assign Call Assign" + }, + { + "library": "pandas", + "name": "is_view", + "source_code": "@property\ndef is_view(self) -> bool:\n if len(self.blocks) == 1:\n return self.blocks[0].is_view\n return False", + "docstring": "return a boolean if we are a single block and are a view", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\managers.py", + "ast_data": "FunctionDef name:is_view arg:self arguments arg If Compare Call Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "_visit_query_reference_node", + "source_code": "def _visit_query_reference_node(self, node):\n query = node.to_query_string()\n for refnode in node.findall(nodes.reference):\n uri = urlsplit(refnode['refuri'])._replace(query=query)\n refnode['refuri'] = urlunsplit(uri)\n self.visit_literal(node)", + "docstring": "Resolve *node* into query strings on its `~docutils.nodes.literal`.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\sphinxext\\roles.py", + "ast_data": "FunctionDef name:_visit_query_reference_node arg:self arg:node arguments arg arg Assign Call For Call Assign Call Call Assign Call Call" + }, + { + "library": "pandas", + "name": "css_calc", + "source_code": "def css_calc(x, left: float, right: float, align: str, color: str | list | tuple):\n if pd.isna(x):\n return base_css\n if isinstance(color, (list, tuple)):\n color = color[0] if x < 0 else color[1]\n assert isinstance(color, str)\n x = left if x < left else x\n x = right if x > right else x\n start: float = 0\n end: float = 1\n if align == 'left':\n end = (x - left) / (right - left)\n elif align == 'right':\n start = (x - left) / (right - left)\n else:\n z_frac: float = 0.5\n if align == 'zero':\n limit: float = max(abs(left), abs(right))\n left, right = (-limit, limit)\n elif align == 'mid':\n mid: float = (left + right) / 2\n z_frac = -mid / (right - left) + 0.5 if mid < 0 else -left / (right - left)\n if x < 0:\n start, end = ((x - left) / (right - left), z_frac)\n else:\n start, end = (z_frac, (x - left) / (right - left))\n ret = css_bar(start * width, end * width, color)\n if height < 1 and 'background: linear-gradient(' in ret:\n return ret + f' no-repeat center; background-size: 100% {height * 100:.1f}%;'\n else:\n return ret", + "docstring": "Return the correct CSS for bar placement based on calculated values. Parameters ---------- x : float Value which determines the bar placement. left : float Value marking the left side of calculation, usually minimum of data. right : float Value marking the right side of the calculation, usually maximum of data (left = 0`` from outer scope.", + "type": "function", + "file_path": "pandas\\pandas\\io\\formats\\style.py", + "ast_data": "FunctionDef name:css_calc arg:x arg:left arg:right arg:align arg:color arguments arg arg arg arg arg If Call Return return:yes If Call Assign Compare Call Assign Compare Assign Compare If Compare Assign If Compare Assign If Compare Call Call Call Assign If Compare Assign Compare If Compare Assign Assign Assign Call If BoolOp Compare Compare Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_run_eager_benchmark", + "source_code": "def _run_eager_benchmark(self, iterable, iters, warmup):\n deltas = []\n if not context.executing_eagerly():\n raise RuntimeError('Eager mode benchmarking is not supported in graph mode.')\n for _ in range(iters):\n if warmup:\n iterator = iter(iterable)\n next(iterator)\n iterator = iter(iterable)\n start = time.time()\n next(iterator)\n end = time.time()\n deltas.append(end - start)\n return np.median(deltas)", + "docstring": "Benchmark the iterable in eager mode. Runs the iterable times. In each iteration, the benchmark measures the time it takes to go execute the iterable. Args: iterable: The tf op or tf.data Dataset to benchmark. iters: Number of times to repeat the timing. warmup: If true, warms up the session caches by running an untimed run. Returns: A float, representing the median time (with respect to ) it takes for the iterable to be executed num of times. Raises: RuntimeError: When executed in graph mode.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\benchmarks\\benchmark_base.py", + "ast_data": "FunctionDef name:_run_eager_benchmark arg:self arg:iterable arg:iters arg:warmup arguments arg arg arg arg Assign If Call Raise Call For Call If Assign Call Call Assign Call Assign Call Call Assign Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "freeze", + "source_code": "def freeze(self, *args, **kwds):\n if isinstance(self, rv_continuous):\n return rv_continuous_frozen(self, *args, **kwds)\n else:\n return rv_discrete_frozen(self, *args, **kwds)", + "docstring": "Freeze the distribution for the given arguments. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution. Should include all the non-optional arguments, may include ``. Returns ------- rv_frozen : rv_frozen instance The frozen distribution.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:freeze arg:self arguments arg arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_get_repr_footer", + "source_code": "def _get_repr_footer(self) -> str:\n category_strs = self._repr_categories()\n dtype = str(self.categories.dtype)\n levheader = f'Categories ({len(self.categories)}, {dtype}): '\n width, _ = get_terminal_size()\n max_width = get_option('display.width') or width\n if console.in_ipython_frontend():\n max_width = 0\n levstring = ''\n start = True\n cur_col_len = len(levheader)\n sep_len, sep = (3, ' < ') if self.ordered else (2, ', ')\n linesep = f'{sep.rstrip()}\\n'\n for val in category_strs:\n if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:\n levstring += linesep + ' ' * (len(levheader) + 1)\n cur_col_len = len(levheader) + 1\n elif not start:\n levstring += sep\n cur_col_len += len(val)\n levstring += val\n start = False\n return f'{levheader}[{levstring.replace(' < ... < ', ' ... ')}]'", + "docstring": "Returns a string representation of the footer.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", + "ast_data": "FunctionDef name:_get_repr_footer arg:self arguments arg Assign Call Assign Call Assign Call Assign Call Assign BoolOp Call If Call Assign Assign Assign Assign Call Assign Assign Call For If BoolOp Compare Compare Call Call Assign Call If Call Assign Return return:yes Call" + }, + { + "library": "kornia", + "name": "sepia_from_rgb", + "source_code": "def sepia_from_rgb(input: Tensor, rescale: bool=True, eps: float=1e-06) -> Tensor:\n if len(input.shape) < 3 or input.shape[-3] != 3:\n raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {input.shape}')\n r = input[..., 0, :, :]\n g = input[..., 1, :, :]\n b = input[..., 2, :, :]\n r_out = 0.393 * r + 0.769 * g + 0.189 * b\n g_out = 0.349 * r + 0.686 * g + 0.168 * b\n b_out = 0.272 * r + 0.534 * g + 0.131 * b\n sepia_out = torch.stack([r_out, g_out, b_out], dim=-3)\n if rescale:\n max_values = sepia_out.amax(dim=-1).amax(dim=-1)\n sepia_out = sepia_out / (max_values[..., None, None] + eps)\n return sepia_out", + "docstring": "Apply to a tensor the sepia filter. Args: input: the input tensor with shape of :math:. rescale: If True, the output tensor will be rescaled (max values be 1. or 255). eps: scalar to enforce numerical stability. Returns: Tensor: The sepia tensor of same size and numbers of channels as the input with shape :math:. Example: >>> input = torch.ones(3, 1, 1) >>> sepia_from_rgb(input, rescale=False) tensor([[[1.3510]], [[1.2030]], [[0.9370]]])", + "type": "function", + "file_path": "kornia\\kornia\\color\\sepia.py", + "ast_data": "FunctionDef name:sepia_from_rgb arg:input arg:rescale arg:eps arguments arg arg arg If BoolOp Compare Call Compare Raise Call Assign Assign Assign Assign Assign Assign Assign Call If Assign Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "isclass", + "source_code": "def isclass(object):\n return _inspect.isclass(tf_decorator.unwrap(object)[1])", + "docstring": "TFDecorator-aware replacement for inspect.isclass.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py", + "ast_data": "FunctionDef name:isclass arg:object arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "ScopedTFImportGraphDefOptions", + "source_code": "class ScopedTFImportGraphDefOptions(object):\n __slots__ = ['options']\n\n def __init__(self):\n self.options = c_api.TF_NewImportGraphDefOptions()\n\n def __del__(self):\n if c_api is not None and c_api.TF_DeleteImportGraphDefOptions is not None:\n c_api.TF_DeleteImportGraphDefOptions(self.options)", + "docstring": "Wrapper around TF_ImportGraphDefOptions that handles deletion.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\framework\\c_api_util.py", + "ast_data": "ClassDef name:ScopedTFImportGraphDefOptions Assign FunctionDef name:__init__ arg:self arguments arg Assign Call FunctionDef name:__del__ arg:self arguments arg If BoolOp Compare Compare Call" + }, + { + "library": "tensorflow", + "name": "get_meta_graph_def_from_tags", + "source_code": "def get_meta_graph_def_from_tags(self, tags):\n if tags is None:\n if len(self._saved_model.meta_graphs) != 1:\n tag_sets = [mg.meta_info_def.tags for mg in self._saved_model.meta_graphs]\n raise ValueError(f'Importing a SavedModel with `tf.saved_model.load` requires a `tags=` argument if there is more than one MetaGraph. Got `tags=None`, but there are {len(self._saved_model.meta_graphs)} MetaGraphs in the SavedModel with tag sets: {tag_sets}. Pass a `tags=` argument to load this SavedModel.')\n return self._saved_model.meta_graphs[0]\n return super(_EagerSavedModelLoader, self).get_meta_graph_def_from_tags(tags)", + "docstring": "Override to support implicit one-MetaGraph loading with tags=None.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load_v1_in_v2.py", + "ast_data": "FunctionDef name:get_meta_graph_def_from_tags arg:self arg:tags arguments arg arg If Compare If Compare Call Assign Raise Call Call Return return:yes Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "find_partition", + "source_code": "def find_partition(self, id: str) -> GraphInfo | None:\n if id == self.id:\n return self\n current_length = len(self.id)\n if len(id) > current_length:\n if id[current_length] == '0' and self.upper_graph_info is not None:\n return self.upper_graph_info.find_partition(id)\n elif id[current_length] == '1' and self.lower_graph_info is not None:\n return self.lower_graph_info.find_partition(id)\n return None", + "docstring": "Find the object with the given id.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\verification.py", + "ast_data": "FunctionDef name:find_partition arg:self arg:id arguments arg arg If Compare Return return:yes Assign Call If Compare Call If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call Return return:no" + }, + { + "library": "pytorch", + "name": "find_parent_nodes_of_subgraph", + "source_code": "def find_parent_nodes_of_subgraph(self, tag: str) -> NodeSet:\n parent_nodes = set()\n for node in self.module.graph.nodes:\n if node.op in CALLABLE_NODE_OPS and node.tag == tag:\n for arg in node.all_input_nodes:\n if arg.op in CALLABLE_NODE_OPS and arg.tag != tag:\n parent_nodes.add(arg)\n return parent_nodes", + "docstring": "Finds parent nodes of the subgraph. Traverse the inputs of nodes in the subgraph, if input doesn't belong to the subgraph and is not a placeholder, we consider it as the parent node of the subgraph.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py", + "ast_data": "FunctionDef name:find_parent_nodes_of_subgraph arg:self arg:tag arguments arg arg Assign Call For If BoolOp Compare Compare For If BoolOp Compare Compare Call Return return:yes" + }, + { + "library": "pandas", + "name": "validate", + "source_code": "def validate(self, other) -> Literal[True] | None:\n if other is None:\n return None\n return True", + "docstring": "validate against an existing storable", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:validate arg:self arg:other arguments arg arg If Compare Return return:no Return return:yes" + }, + { + "library": "pytorch", + "name": "quantize_per_channel", + "source_code": "@impl(quantized_decomposed_lib, 'quantize_per_channel', 'CompositeExplicitAutograd')\ndef quantize_per_channel(input: torch.Tensor, scales: torch.Tensor, zero_points: torch.Tensor, axis: int, quant_min: int, quant_max: int, dtype: torch.dtype) -> torch.Tensor:\n if input.dtype in [torch.float16, torch.bfloat16]:\n input = input.to(torch.float32)\n assert input.dtype == torch.float32, f'Expecting input to have dtype torch.float32, but got dtype: {input.dtype}'\n assert axis < input.dim(), f'Expecting axis to be < {input.dim()}'\n _quant_min_max_bounds_check(quant_min, quant_max, dtype)\n input, permute_axis_list = _permute_to_axis_zero(input, axis)\n new_shape = [1] * input.dim()\n new_shape[0] = scales.shape[0]\n scales = scales.view(new_shape)\n zero_points = zero_points.view(new_shape)\n res = torch.clamp(torch.round(input * (1.0 / scales)) + zero_points, quant_min, quant_max)\n out = res.permute(tuple(permute_axis_list))\n return out.to(dtype)", + "docstring": "Affine per channel quantization for the Tensor using the same quantization parameters for each channel/axis to map from floating point to quantized values Args: input (torch.Tensor): original float32 or bfloat16 Tensor scales (torch.Tensor): a list of scale quantization parameter for affine quantization, one per channel zero_point (torch.Tensor): a list of zero_point quantization parameter for affine quantization, one per channel quant_min (int): minimum quantized value for output Tensor quant_max (int): maximum quantized value for output Tensor dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor Returns: Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters are not stored in the Tensor, we are storing them in function arguments instead", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py", + "ast_data": "FunctionDef name:quantize_per_channel arg:input arg:scales arg:zero_points arg:axis arg:quant_min arg:quant_max arg:dtype arguments arg arg arg arg arg arg arg If Compare Assign Call Compare Compare Call Call Call Assign Call Assign Call Assign Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "raise_requested_exception", + "source_code": "def raise_requested_exception(self):\n with self._lock:\n if self._exc_info_to_raise:\n _, ex_instance, _ = self._exc_info_to_raise\n raise ex_instance", + "docstring": "If an exception has been passed to , this raises it.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py", + "ast_data": "FunctionDef name:raise_requested_exception arg:self arguments arg With If Assign Raise" + }, + { + "library": "pytorch", + "name": "_dropout_helper", + "source_code": "def _dropout_helper(self: TensorLikeType, val: float) -> TensorLikeType:\n return refs._uniform_helper(self.shape, low=0.0, high=1.0, dtype=torch.float32, device=self.device) < val", + "docstring": "Helper function for all dropout-type operators. During training, some of the elements of the input tensor are randomly masked. Returns the masked tensor of the boolean values.", + "type": "function", + "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py", + "ast_data": "FunctionDef name:_dropout_helper arg:self arg:val arguments arg arg Return return:yes Compare Call" + }, + { + "library": "tensorflow", + "name": "on_run_end", + "source_code": "def on_run_end(self, request):\n return OnRunEndResponse()", + "docstring": "See doc of BaseDebugWrapperSession.on_run_end.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py", + "ast_data": "FunctionDef name:on_run_end arg:self arg:request arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "switch_orientation", + "source_code": "def switch_orientation(self):\n segments = self.get_segments()\n for i, segment in enumerate(segments):\n segments[i] = np.fliplr(segment)\n self.set_segments(segments)\n self._is_horizontal = not self.is_horizontal()\n self.stale = True", + "docstring": "Switch the orientation of the event line, either from vertical to horizontal or vice versus.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\collections.py", + "ast_data": "FunctionDef name:switch_orientation arg:self arguments arg Assign Call For Call Assign Call Call Assign Call Assign" + }, + { + "library": "scikit-learn", + "name": "_BisectingTree", + "source_code": "class _BisectingTree:\n\n def __init__(self, center, indices, score):\n self.center = center\n self.indices = indices\n self.score = score\n self.left = None\n self.right = None\n\n def split(self, labels, centers, scores):\n self.left = _BisectingTree(indices=self.indices[labels == 0], center=centers[0], score=scores[0])\n self.right = _BisectingTree(indices=self.indices[labels == 1], center=centers[1], score=scores[1])\n self.indices = None\n\n def get_cluster_to_bisect(self):\n max_score = None\n for cluster_leaf in self.iter_leaves():\n if max_score is None or cluster_leaf.score > max_score:\n max_score = cluster_leaf.score\n best_cluster_leaf = cluster_leaf\n return best_cluster_leaf\n\n def iter_leaves(self):\n if self.left is None:\n yield self\n else:\n yield from self.left.iter_leaves()\n yield from self.right.iter_leaves()", + "docstring": "Tree structure representing the hierarchical clusters of BisectingKMeans.", + "type": "class", + "file_path": "scikit-learn\\sklearn\\cluster\\_bisect_k_means.py", + "ast_data": "ClassDef name:_BisectingTree FunctionDef name:__init__ arg:self arg:center arg:indices arg:score arguments arg arg arg arg Assign Assign Assign Assign Assign FunctionDef name:split arg:self arg:labels arg:centers arg:scores arguments arg arg arg arg Assign Call Compare Assign Call Compare Assign FunctionDef name:get_cluster_to_bisect arg:self arguments arg Assign For Call If BoolOp Compare Compare Assign Assign Return return:yes FunctionDef name:iter_leaves arg:self arguments arg If Compare Call Call" + }, + { + "library": "sphinx", + "name": "fix_svg_relative_paths", + "source_code": "def fix_svg_relative_paths(self: HTML5Translator | LaTeXTranslator | TexinfoTranslator, filepath: str | os.PathLike[str]) -> None:\n env = self.builder.env\n tree = ET.parse(filepath)\n root = tree.getroot()\n ns = {'svg': 'http://www.w3.org/2000/svg', 'xlink': 'http://www.w3.org/1999/xlink'}\n href_name = '{http://www.w3.org/1999/xlink}href'\n modified = False\n for element in chain(root.findall('.//svg:image[@xlink:href]', ns), root.findall('.//svg:a[@xlink:href]', ns)):\n scheme, hostname, rel_uri, query, fragment = urlsplit(element.attrib[href_name])\n if hostname:\n continue\n docname = env.path2doc(self.document['source'])\n if docname is None:\n continue\n doc_dir = self.builder.outdir.joinpath(docname).resolve().parent\n old_path = doc_dir / rel_uri\n img_path = doc_dir / self.builder.imgpath\n new_path = os.path.relpath(old_path, start=img_path)\n modified_url = urlunsplit((scheme, hostname, new_path, query, fragment))\n element.set(href_name, modified_url)\n modified = True\n if modified:\n tree.write(filepath)", + "docstring": "Change relative links in generated svg files to be relative to imgpath.", + "type": "function", + "file_path": "sphinx\\sphinx\\ext\\graphviz.py", + "ast_data": "FunctionDef name:fix_svg_relative_paths arg:self arg:filepath arguments arg arg Assign Assign Call Assign Call Assign Assign Assign For Call Call Call Assign Call If Assign Call If Compare Assign Call Call Assign Assign Assign Call Assign Call Call Assign If Call" + }, + { + "library": "pytorch", + "name": "generate_numeric_debug_handle", + "source_code": "def generate_numeric_debug_handle(ep: ExportedProgram) -> None:\n if not isinstance(ep, ExportedProgram):\n raise ValueError(f'Expected ep to be ExportedProgram, got {type(ExportedProgram)}')\n unique_id = 0\n\n def _find_max_id(node: torch.fx.Node) -> None:\n nonlocal unique_id\n unique_id = max(unique_id, node.meta.get(CUSTOM_KEY, {}).get(NUMERIC_DEBUG_HANDLE_KEY, 0))\n\n def _assign_debug_handle(node: torch.fx.Node) -> None:\n nonlocal unique_id\n if CUSTOM_KEY not in node.meta:\n node.meta[CUSTOM_KEY] = {}\n if NUMERIC_DEBUG_HANDLE_KEY not in node.meta[CUSTOM_KEY]:\n node.meta[CUSTOM_KEY][NUMERIC_DEBUG_HANDLE_KEY] = unique_id\n unique_id += 1\n bfs_trace_with_node_process(ep, _find_max_id)\n unique_id += 1\n bfs_trace_with_node_process(ep, _assign_debug_handle)", + "docstring": "Attach numeric_debug_handle_id for all nodes in the graph module of the given ExportedProgram, like conv2d, squeeze, conv1d, etc, except for placeholder. Notice that nodes like getattr are out of scope since they are not in the graph. The graph nodes of input exported program are modified inplace. Here's an example of using debug handle quantize flow:: ep = export_for_training(eager_model, example_inputs) generate_numeric_debug_handle(ep) m = ep.module() quantizer = XNNPACKQuantizer() m = prepare_pt2e(m, quantizer) m = convert_pt2e(m)", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\_numeric_debugger.py", + "ast_data": "FunctionDef name:generate_numeric_debug_handle arg:ep arguments arg If Call Raise Call Call Assign FunctionDef name:_find_max_id arg:node arguments arg Assign Call Call Call FunctionDef name:_assign_debug_handle arg:node arguments arg If Compare Assign If Compare Assign Call Call" + }, + { + "library": "pytorch", + "name": "SELU", + "source_code": "class SELU(Module):\n __constants__ = ['inplace']\n inplace: bool\n\n def __init__(self, inplace: bool=False) -> None:\n super().__init__()\n self.inplace = inplace\n\n def forward(self, input: Tensor) -> Tensor:\n return F.selu(input, self.inplace)\n\n def extra_repr(self) -> str:\n inplace_str = 'inplace=True' if self.inplace else ''\n return inplace_str", + "docstring": "Applies the SELU function element-wise. .. math:: \\text{SELU}(x) = \\text{scale} * (\\max(0,x) + \\min(0, \\alpha * (\\exp(x) - 1))) with :math: and :math:. .. warning:: When using `Self-Normalizing Neural Networkstorch.nn.init.calculate_gainSelf-Normalizing Neural Networks(*)*(*)`, same shape as the input. .. image:: ../scripts/activation_images/SELU.png Examples:: >>> m = nn.SELU() >>> input = torch.randn(2) >>> output = m(input) .. _Self-Normalizing Neural Networks:", + "type": "class", + "file_path": "pytorch\\torch\\nn\\modules\\activation.py", + "ast_data": "ClassDef name:SELU Assign FunctionDef name:__init__ arg:self arg:inplace arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "name", + "source_code": "@property\ndef name(self):\n return self.__name", + "docstring": "Returns the file name.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", + "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "intersection", + "source_code": "@staticmethod\ndef intersection(bbox1, bbox2):\n x0 = np.maximum(bbox1.xmin, bbox2.xmin)\n x1 = np.minimum(bbox1.xmax, bbox2.xmax)\n y0 = np.maximum(bbox1.ymin, bbox2.ymin)\n y1 = np.minimum(bbox1.ymax, bbox2.ymax)\n return Bbox([[x0, y0], [x1, y1]]) if x0 <= x1 and y0 <= y1 else None", + "docstring": "Return the intersection of *bbox1* and *bbox2* if they intersect, or None if they don't.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:intersection arg:bbox1 arg:bbox2 arguments arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes BoolOp Compare Compare Call" + }, + { + "library": "matplotlib", + "name": "from_levels_and_colors", + "source_code": "def from_levels_and_colors(levels, colors, extend='neither'):\n slice_map = {'both': slice(1, -1), 'min': slice(1, None), 'max': slice(0, -1), 'neither': slice(0, None)}\n _api.check_in_list(slice_map, extend=extend)\n color_slice = slice_map[extend]\n n_data_colors = len(levels) - 1\n n_extend_colors = color_slice.start - (color_slice.stop or 0)\n n_expected = n_data_colors + n_extend_colors\n if len(colors) != n_expected:\n raise ValueError(f'Expected {n_expected} colors ({n_data_colors} colors for {len(levels)} levels, and {n_extend_colors} colors for extend == {extend!r}), but got {len(colors)}')\n data_colors = colors[color_slice]\n under_color = colors[0] if extend in ['min', 'both'] else 'none'\n over_color = colors[-1] if extend in ['max', 'both'] else 'none'\n cmap = ListedColormap(data_colors, under=under_color, over=over_color)\n cmap.colorbar_extend = extend\n norm = BoundaryNorm(levels, ncolors=n_data_colors)\n return (cmap, norm)", + "docstring": "A helper routine to generate a cmap and a norm instance which behave similar to contourf's levels and colors arguments. Parameters ---------- levels : sequence of numbers The quantization levels used to construct the . Value `~.Axes.contourf~matplotlib.colors.Colormap~matplotlib.colors.Normalize`", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\colors.py", + "ast_data": "FunctionDef name:from_levels_and_colors arg:levels arg:colors arg:extend arguments arg arg arg Assign Call Call Call Call Call Assign Assign Call Assign BoolOp Assign If Compare Call Raise Call Call Call Assign Assign Compare Assign Compare Assign Call Assign Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "staged_predict", + "source_code": "def staged_predict(self, X):\n for raw_predictions in self._staged_raw_predict(X):\n yield self._loss.link.inverse(raw_predictions.ravel())", + "docstring": "Predict regression target for each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. .. versionadded:: 0.24 Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Yields ------ y : generator of ndarray of shape (n_samples,) The predicted values of the input samples, for each iteration.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py", + "ast_data": "FunctionDef name:staged_predict arg:self arg:X arguments arg arg For Call Call Call" + }, + { + "library": "scikit-learn", + "name": "inplace_logistic_derivative", + "source_code": "def inplace_logistic_derivative(Z, delta):\n delta *= Z\n delta *= 1 - Z", + "docstring": "Apply the derivative of the logistic sigmoid function. It exploits the fact that the derivative is a simple function of the output value from logistic function. Parameters ---------- Z : {array-like, sparse matrix}, shape (n_samples, n_features) The data which was output from the logistic activation function during the forward pass. delta : {array-like}, shape (n_samples, n_features) The backpropagated error signal to be modified inplace.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py", + "ast_data": "FunctionDef name:inplace_logistic_derivative arg:Z arg:delta arguments arg arg" + }, + { + "library": "tensorflow", + "name": "truncated_normal", + "source_code": "def truncated_normal(self, shape, mean, stddev, dtype):\n if self.seed:\n op = stateless_random_ops.stateless_truncated_normal\n else:\n op = random_ops.truncated_normal\n return op(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)", + "docstring": "A deterministic truncated normal if seed is passed.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py", + "ast_data": "FunctionDef name:truncated_normal arg:self arg:shape arg:mean arg:stddev arg:dtype arguments arg arg arg arg arg If Assign Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "starter_nodes", + "source_code": "def starter_nodes(self) -> tuple[NodeSet, NodeSet]:\n starter_cpu_nodes: NodeSet = set()\n starter_acc_nodes: NodeSet = set()\n for node in self.module.graph.nodes:\n if node.op not in {'placeholder', 'get_attr'}:\n continue\n for user in node.users:\n if user in self.acc_nodes:\n starter_acc_nodes.add(user)\n else:\n starter_cpu_nodes.add(user)\n return (starter_cpu_nodes, starter_acc_nodes)", + "docstring": "Finds nodes that consume module inputs or get_attr nodes.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py", + "ast_data": "FunctionDef name:starter_nodes arg:self arguments arg Call Call For If Compare For If Compare Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "python_properties", + "source_code": "@abc.abstractproperty\ndef python_properties(self):\n raise NotImplementedError", + "docstring": "Returns dictionary of python properties to save in the metadata. This dictionary must be serializable and deserializable to/from JSON. When loading, the items in this dict are used to initialize the object and define attributes in the revived object.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\base_serialization.py", + "ast_data": "FunctionDef name:python_properties arg:self arguments arg Raise" + }, + { + "library": "tensorflow", + "name": "get_user_orgs", + "source_code": "def get_user_orgs(self, username: str) -> requests.Response:\n endpoint = f'users/{username}/orgs'\n return self._make_request('GET', endpoint, username=username)", + "docstring": "Gets all public org memberships for a user. Arguments: username: The user's GitHub username as a string. Returns: a requests.Response object containing the response from the API. Raises: requests.exceptions.HTTPError", + "type": "method", + "file_path": "tensorflow\\third_party\\xla\\.github\\workflows\\github_api.py", + "ast_data": "FunctionDef name:get_user_orgs arg:self arg:username arguments arg arg Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "interpgrid", + "source_code": "def interpgrid(a, xi, yi):\n Ny, Nx = np.shape(a)\n if isinstance(xi, np.ndarray):\n x = xi.astype(int)\n y = yi.astype(int)\n xn = np.clip(x + 1, 0, Nx - 1)\n yn = np.clip(y + 1, 0, Ny - 1)\n else:\n x = int(xi)\n y = int(yi)\n if x == Nx - 1:\n xn = x\n else:\n xn = x + 1\n if y == Ny - 1:\n yn = y\n else:\n yn = y + 1\n a00 = a[y, x]\n a01 = a[y, xn]\n a10 = a[yn, x]\n a11 = a[yn, xn]\n xt = xi - x\n yt = yi - y\n a0 = a00 * (1 - xt) + a01 * xt\n a1 = a10 * (1 - xt) + a11 * xt\n ai = a0 * (1 - yt) + a1 * yt\n if not isinstance(xi, np.ndarray):\n if np.ma.is_masked(ai):\n raise TerminateTrajectory\n return ai", + "docstring": "Fast 2D, linear interpolation on an integer grid", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\streamplot.py", + "ast_data": "FunctionDef name:interpgrid arg:a arg:xi arg:yi arguments arg arg arg Assign Call If Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If Compare Assign Assign If Compare Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign If Call If Call Raise Return return:yes" + }, + { + "library": "seaborn", + "name": "_fit", + "source_code": "def _fit(self, data: DataFrame, orient: str) -> gaussian_kde:\n fit_kws: dict[str, Any] = {'bw_method': self.bw_method}\n if 'weight' in data:\n fit_kws['weights'] = data['weight']\n kde = gaussian_kde(data[orient], **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n return kde", + "docstring": "Fit and return a KDE object.", + "type": "method", + "file_path": "seaborn\\seaborn\\_stats\\density.py", + "ast_data": "FunctionDef name:_fit arg:self arg:data arg:orient arguments arg arg arg If Compare Assign Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_rotating_buffer_size", + "source_code": "def get_rotating_buffer_size() -> int:\n return torch._C._cuda_tunableop_get_rotating_buffer_size()", + "docstring": "Get the rotating buffer size in kilobytes.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\tunable.py", + "ast_data": "FunctionDef name:get_rotating_buffer_size arguments Return return:yes Call" + }, + { + "library": "pytorch", + "name": "PHWithMeta", + "source_code": "@compatibility(is_backward_compatible=False)\nclass PHWithMeta(PHBase):\n\n def __init__(self, ph_key: Optional[str]=None):\n super().__init__()\n self.ph_key = ph_key", + "docstring": "Object representing an input placeholder to", + "type": "class", + "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py", + "ast_data": "ClassDef name:PHWithMeta FunctionDef name:__init__ arg:self arg:ph_key arguments arg arg Call Call Assign Call" + }, + { + "library": "pytorch", + "name": "constant_", + "source_code": "def constant_(tensor: Tensor, val: float) -> Tensor:\n if torch.overrides.has_torch_function_variadic(tensor):\n return torch.overrides.handle_torch_function(constant_, (tensor,), tensor=tensor, val=val)\n return _no_grad_fill_(tensor, val)", + "docstring": "Fill the input Tensor with the value :math:. Args: tensor: an n-dimensional val: the value to fill the tensor with Examples: >>> w = torch.empty(3, 5) >>> nn.init.constant_(w, 0.3)", + "type": "function", + "file_path": "pytorch\\torch\\nn\\init.py", + "ast_data": "FunctionDef name:constant_ arg:tensor arg:val arguments arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_arc", + "source_code": "def _arc(self, quadrant=0, cw=True, radius=1, center=(0, 0)):\n ARC_CODES = [Path.LINETO, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4]\n ARC_VERTICES = np.array([[1.0, 0.0], [1.0, 0.265114773], [0.894571235, 0.519642327], [0.707106781, 0.707106781], [0.519642327, 0.894571235], [0.265114773, 1.0], [0.0, 1.0]])\n if quadrant in (0, 2):\n if cw:\n vertices = ARC_VERTICES\n else:\n vertices = ARC_VERTICES[:, ::-1]\n elif cw:\n vertices = np.column_stack((-ARC_VERTICES[:, 1], ARC_VERTICES[:, 0]))\n else:\n vertices = np.column_stack((-ARC_VERTICES[:, 0], ARC_VERTICES[:, 1]))\n if quadrant > 1:\n radius = -radius\n return list(zip(ARC_CODES, radius * vertices + np.tile(center, (ARC_VERTICES.shape[0], 1))))", + "docstring": "Return the codes and vertices for a rotated, scaled, and translated 90 degree arc. Other Parameters ---------------- quadrant : {0, 1, 2, 3}, default: 0 Uses 0-based indexing (0, 1, 2, or 3). cw : bool, default: True If True, the arc vertices are produced clockwise; counter-clockwise otherwise. radius : float, default: 1 The radius of the arc. center : (float, float), default: (0, 0) (x, y) tuple of the arc's center.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\sankey.py", + "ast_data": "FunctionDef name:_arc arg:self arg:quadrant arg:cw arg:radius arg:center arguments arg arg arg arg arg Assign Assign Call If Compare If Assign Assign If Assign Call Assign Call If Compare Assign Return return:yes Call Call Call" + }, + { + "library": "scipy", + "name": "tukeylambda_variance", + "source_code": "def tukeylambda_variance(lam):\n lam = np.asarray(lam)\n shp = lam.shape\n lam = np.atleast_1d(lam).astype(np.float64)\n threshold = 0.075\n low_mask = lam < -0.5\n neghalf_mask = lam == -0.5\n small_mask = np.abs(lam) < threshold\n reg_mask = ~(low_mask | neghalf_mask | small_mask)\n small = lam[small_mask]\n reg = lam[reg_mask]\n v = np.empty_like(lam)\n v[low_mask] = np.nan\n v[neghalf_mask] = np.inf\n if small.size > 0:\n v[small_mask] = _tukeylambda_var_p(small) / _tukeylambda_var_q(small)\n if reg.size > 0:\n v[reg_mask] = 2.0 / reg ** 2 * (1.0 / (1.0 + 2 * reg) - beta(reg + 1, reg + 1))\n v.shape = shp\n return v", + "docstring": "Variance of the Tukey Lambda distribution. Parameters ---------- lam : array_like The lambda values at which to compute the variance. Returns ------- v : ndarray The variance. For lam < -0.5, the variance is not defined, so np.nan is returned. For lam = 0.5, np.inf is returned. Notes ----- In an interval around lambda=0, this function uses the [4,4] Pade approximation to compute the variance. Otherwise it uses the standard formula ( The Pade approximation is used because the standard formula has a removable discontinuity at lambda = 0, and does not produce accurate numerical results near lambda = 0.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_tukeylambda_stats.py", + "ast_data": "FunctionDef name:tukeylambda_variance arg:lam arguments arg Assign Call Assign Assign Call Call Assign Assign Compare Assign Compare Assign Compare Call Assign Assign Assign Assign Call Assign Assign If Compare Assign Call Call If Compare Assign Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "_get_splitter", + "source_code": "@final\ndef _get_splitter(self, data: NDFrame) -> DataSplitter:\n if isinstance(data, Series):\n klass: type[DataSplitter] = SeriesSplitter\n else:\n klass = FrameSplitter\n return klass(data, self.ngroups, sorted_ids=self._sorted_ids, sort_idx=self.result_ilocs)", + "docstring": "Returns ------- Generator yielding subsetted objects", + "type": "method", + "file_path": "pandas\\pandas\\core\\groupby\\ops.py", + "ast_data": "FunctionDef name:_get_splitter arg:self arg:data arguments arg arg If Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "RestoredFunction", + "source_code": "class RestoredFunction(def_function.Function):\n\n def __init__(self, python_function, name, function_spec, concrete_functions):\n super(RestoredFunction, self).__init__(python_function, name, autograph=False, jit_compile=function_spec.jit_compile)\n self.concrete_functions = concrete_functions\n self._function_type = function_spec.function_type\n self._default_values = function_spec.default_values\n self._omit_frequent_tracing_warning = True\n\n @property\n def _run_functions_eagerly(self):\n return False\n\n def _list_all_concrete_functions(self):\n return self.concrete_functions\n\n def _list_all_concrete_functions_for_serialization(self):\n return self.concrete_functions", + "docstring": "Wrapper class for a function that has been restored from saved state. See .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_deserialization.py", + "ast_data": "ClassDef name:RestoredFunction FunctionDef name:__init__ arg:self arg:python_function arg:name arg:function_spec arg:concrete_functions arguments arg arg arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:_run_functions_eagerly arg:self arguments arg Return return:yes FunctionDef name:_list_all_concrete_functions arg:self arguments arg Return return:yes FunctionDef name:_list_all_concrete_functions_for_serialization arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "non_trainable_variables", + "source_code": "@property\ndef non_trainable_variables(self):\n if not self._variables_created:\n return []\n return self._template_store.non_trainable_variables()", + "docstring": "Returns the list of non-trainable variables created by the Template.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py", + "ast_data": "FunctionDef name:non_trainable_variables arg:self arguments arg If Return return:no Return return:yes Call" + }, + { + "library": "django", + "name": "hex", + "source_code": "@property\ndef hex(self):\n return b2a_hex(self.wkb).upper()", + "docstring": "Return the hexadecimal representation of the WKB (a string).", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:hex arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "from_operator", + "source_code": "@classmethod\ndef from_operator(cls, operator):\n validation_fields = ('is_non_singular', 'is_self_adjoint', 'is_positive_definite', 'is_square')\n kwargs = _extract_attrs(operator, keys=set(operator._composite_tensor_fields + validation_fields))\n non_tensor_params = {}\n param_specs = {}\n for k, v in list(kwargs.items()):\n type_spec_or_v = _extract_type_spec_recursively(v)\n is_tensor = [isinstance(x, type_spec.TypeSpec) for x in nest.flatten(type_spec_or_v)]\n if all(is_tensor):\n param_specs[k] = type_spec_or_v\n elif not any(is_tensor):\n non_tensor_params[k] = v\n else:\n raise NotImplementedError(f'Field {k} contains a mix of `Tensor` and non-`Tensor` values.')\n return cls(param_specs=param_specs, non_tensor_params=non_tensor_params, prefer_static_fields=operator._composite_tensor_prefer_static_fields)", + "docstring": "Builds a from a instance. Args: operator: An instance of . Returns: linear_operator_spec: An instance of to be used as the of .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", + "ast_data": "FunctionDef name:from_operator arg:cls arg:operator arguments arg arg Assign Assign Call Call Assign Assign For Call Call Assign Call Assign Call Call If Call Assign If Call Assign Raise Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_drci_classifications", + "source_code": "@retries_decorator()\ndef get_drci_classifications(pr_num: int, project: str='pytorch') -> Any:\n failures = gh_fetch_url(f'https://hud.pytorch.org/api/drci/drci?prNumber={pr_num}', data=f'repo={project}', headers={'Authorization': os.getenv('DRCI_BOT_KEY', ''), 'Accept': 'application/vnd.github.v3+json'}, method='POST', reader=json.load)\n return failures.get(str(pr_num), {}) if failures else {}", + "docstring": "Query HUD API to find similar failures to decide if they are flaky", + "type": "function", + "file_path": "pytorch\\.github\\scripts\\trymerge.py", + "ast_data": "FunctionDef name:get_drci_classifications arg:pr_num arg:project arguments arg arg Assign Call Call Return return:yes Call Call Call" + }, + { + "library": "numpy", + "name": "sections", + "source_code": "def sections(self):\n return list(self._sections.keys())", + "docstring": "Return the section headers of the config file. Parameters ---------- None Returns ------- keys : list of str The list of section headers.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\npy_pkg_config.py", + "ast_data": "FunctionDef name:sections arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_repr_fits_horizontal_", + "source_code": "def _repr_fits_horizontal_(self) -> bool:\n width, height = console.get_console_size()\n max_columns = get_option('display.max_columns')\n nb_columns = len(self.columns)\n if max_columns and nb_columns > max_columns or (width and nb_columns > width // 2):\n return False\n if width is None or not console.in_interactive_session():\n return True\n if get_option('display.width') is not None or console.in_ipython_frontend():\n max_rows = 1\n else:\n max_rows = get_option('display.max_rows')\n buf = StringIO()\n d = self\n if max_rows is not None:\n d = d.iloc[:min(max_rows, len(d))]\n else:\n return True\n d.to_string(buf=buf)\n value = buf.getvalue()\n repr_width = max((len(line) for line in value.split('\\n')))\n return repr_width < width", + "docstring": "Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns.", + "type": "method", + "file_path": "pandas\\pandas\\core\\frame.py", + "ast_data": "FunctionDef name:_repr_fits_horizontal_ arg:self arguments arg Assign Call Assign Call Assign Call If BoolOp BoolOp Compare BoolOp Compare Return return:yes If BoolOp Compare Call Return return:yes If BoolOp Compare Call Call Assign Assign Call Assign Call Assign If Compare Assign Call Call Return return:yes Call Assign Call Assign Call Call Call Return return:yes Compare" + }, + { + "library": "scikit-learn", + "name": "_get_loss", + "source_code": "@abstractmethod\ndef _get_loss(self, sample_weight):\n pass", + "docstring": "Get loss object from sklearn._loss.loss.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py", + "ast_data": "FunctionDef name:_get_loss arg:self arg:sample_weight arguments arg arg" + }, + { + "library": "kornia", + "name": "inverse", + "source_code": "@classmethod\ndef inverse(cls, input: Boxes, module: Module, param: ParamItem, extra_args: Optional[Dict[str, Any]]=None) -> Boxes:\n if extra_args is None:\n extra_args = {}\n _input = input.clone()\n if isinstance(module, (K.GeometricAugmentationBase2D,)):\n if module.transform_matrix is None:\n raise ValueError(f'No valid transformation matrix found in {module.__class__}.')\n transform = module.compute_inverse_transformation(module.transform_matrix)\n _input = module.inverse_boxes(_input, param.data, module.flags, transform=transform, **extra_args)\n elif isinstance(module, (K.GeometricAugmentationBase3D,)):\n raise NotImplementedError('The support for 3d box operations are not yet supported. You are welcome to file a PR in our repo.')\n elif isinstance(module, K.ImageSequential) and (not module.is_intensity_only()):\n _input = module.inverse_boxes(_input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n elif isinstance(module, K.container.ImageSequentialBase):\n _input = module.inverse_boxes(_input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n elif isinstance(module, (K.auto.operations.OperationBase,)):\n return BoxSequentialOps.inverse(input, module=module.op, param=param, extra_args=extra_args)\n return _input", + "docstring": "Inverse a transformation with respect to the parameters. Args: input: the input tensor. module: any torch Module but only kornia augmentation modules will count to apply transformations. param: the corresponding parameters to the module. extra_args: Optional dictionary of extra arguments with specific options for different input types.", + "type": "method", + "file_path": "kornia\\kornia\\augmentation\\container\\ops.py", + "ast_data": "FunctionDef name:inverse arg:cls arg:input arg:module arg:param arg:extra_args arguments arg arg arg arg arg If Compare Assign Assign Call If Call If Compare Raise Call Assign Call Assign Call If Call Raise Call If BoolOp Call Call Assign Call Call If Call Assign Call Call If Call Return return:yes Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_graph_summary_tag", + "source_code": "def _graph_summary_tag(graph):\n if graph is None:\n raise RuntimeError('graph is None')\n hash_id = hashlib.md5()\n hash_id.update(repr(graph).encode('utf-8'))\n return hash_id.hexdigest()", + "docstring": "Generates and returns a summary tag name for the given graph.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:_graph_summary_tag arg:graph arguments arg If Compare Raise Call Assign Call Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "args_spec", + "source_code": "@property\ndef args_spec(self) -> tuple[DTensorSpec, ...]:\n args = tree_leaves(self.args_schema) if self.schema_info is not None and self.schema_info.needs_pytree else self.args_schema\n return tuple((item for item in args if isinstance(item, DTensorSpec)))", + "docstring": "args_spec: Tuple[DTensorSpec, ...]: contains a clean list of args spec list with NO non-DTensor positional arguments (i.e. int/float/tuple, etc) mainly used by sharding propagation to propagate the output spec", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py", + "ast_data": "FunctionDef name:args_spec arg:self arguments arg Assign BoolOp Compare Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_get_ops_from_nodedefs", + "source_code": "def _get_ops_from_nodedefs(node_defs):\n ops = set()\n for node_def in node_defs:\n op_and_kernel = get_ops_from_nodedef(node_def)\n if op_and_kernel:\n ops.add(op_and_kernel)\n return ops", + "docstring": "Gets the ops and kernels needed from the list of NodeDef. If a NodeDef's op is not in the allowlist of ops without kernel and there is no kernel found for this NodeDef, then skip that NodeDef and proceed to the next one. Args: node_defs: list of NodeDef's to get op/kernel information. Returns: A set of (op_name, kernel_name) tuples.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\selective_registration_header_lib.py", + "ast_data": "FunctionDef name:_get_ops_from_nodedefs arg:node_defs arguments arg Assign Call For Assign Call If Call Return return:yes" + }, + { + "library": "numpy", + "name": "IndexExpression", + "source_code": "class IndexExpression:\n __slots__ = ('maketuple',)\n\n def __init__(self, maketuple):\n self.maketuple = maketuple\n\n def __getitem__(self, item):\n if self.maketuple and (not isinstance(item, tuple)):\n return (item,)\n else:\n return item", + "docstring": "A nicer way to build up index tuples for arrays. .. note:: Use one of the two predefined instances `s_IndexExpressionas_ = IndexExpression(maketuple=False)index_exp = IndexExpression(maketuple=True)slice` plus a few special objects, but there's a lot to remember and this version is simpler because it uses the standard array indexing syntax. Examples -------- >>> import numpy as np >>> np.s_[2::2] slice(2, None, 2) >>> np.index_exp[2::2] (slice(2, None, 2),) >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] array([2, 4])", + "type": "class", + "file_path": "numpy\\numpy\\lib\\_index_tricks_impl.py", + "ast_data": "ClassDef name:IndexExpression Assign FunctionDef name:__init__ arg:self arg:maketuple arguments arg arg Assign FunctionDef name:__getitem__ arg:self arg:item arguments arg arg If BoolOp Call Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "temporary_file_path", + "source_code": "def temporary_file_path(self):\n return self.file.name", + "docstring": "Return the full path of this file.", + "type": "method", + "file_path": "django\\django\\core\\files\\uploadedfile.py", + "ast_data": "FunctionDef name:temporary_file_path arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "trace_with_input_signature", + "source_code": "def trace_with_input_signature(self):\n if None not in nest.flatten(self._input_signature) and self._has_kwargs:\n self.add_trace(*self._input_signature)", + "docstring": "Trace with the layer/models inferred input signature if possible.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py", + "ast_data": "FunctionDef name:trace_with_input_signature arg:self arguments arg If BoolOp Compare Call Call" + }, + { + "library": "pytorch", + "name": "get_node_local_rank", + "source_code": "def get_node_local_rank(fallback_rank: Optional[int]=None) -> int:\n if 'LOCAL_RANK' in os.environ:\n return int(os.environ['LOCAL_RANK'])\n elif fallback_rank is not None:\n return int(fallback_rank)\n raise RuntimeError('LOCAL_RANK is not in the environment. Consider passing fallback_rank to allow `get_node_local_rank` to work, assuming you are not running in a multi-device context and want the code to run locally instead.')", + "docstring": "Return the local rank of the current process relative to the node. Semantically, this is a useful concept for mapping processes to devices. For example, on a node with 8 accelerator you could use the node local rank to decide which accelerator device to bind the process to. In practice, the actual assignment of node local ranks is handled by the process launcher outside of pytorch, and communicated via the environment variable. Torchrun will automatically populate , but other launchers may not. If is unspecified, this API will fall back to the provided kwarg 'fallback_rank' if specified, otherwise it will raise an error. The intent is to allow writing an application that runs either in single or multi device contexts without error.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", + "ast_data": "FunctionDef name:get_node_local_rank arg:fallback_rank arguments arg If Compare Return return:yes Call If Compare Return return:yes Call Raise Call" + }, + { + "library": "pytorch", + "name": "_register_pre_forward_hook", + "source_code": "@no_type_check\ndef _register_pre_forward_hook(state: _FSDPState, module: nn.Module) -> None:\n for forward_handle in state._pre_forward_handles:\n forward_handle.remove()\n state._pre_forward_handles.clear()\n module_param_handle = state._fully_sharded_module_to_handle.get(module, None)\n hook = functools.partial(_pre_forward, state, module_param_handle, _pre_forward_unshard)\n state._pre_forward_handles.append(module.register_forward_pre_hook(hook, prepend=True, with_kwargs=True))", + "docstring": "Registers a pre-forward hook on ``.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py", + "ast_data": "FunctionDef name:_register_pre_forward_hook arg:state arg:module arguments arg arg For Call Call Assign Call Assign Call Call Call" + }, + { + "library": "pytorch", + "name": "set_wrap_triton_enabled", + "source_code": "@contextlib.contextmanager\ndef set_wrap_triton_enabled(enabled: bool) -> Generator[None, None, None]:\n try:\n prev = is_wrap_triton_enabled()\n wrap_triton_enabled.value = enabled\n yield\n finally:\n wrap_triton_enabled.value = prev", + "docstring": "If triton kernels annotated with @wrap_triton should dispatch via HOP or go straight to the triton kernel execution. We have this switch because eager-mode performance of HOP dispatch is slow enough to matter (~1ms) and we know that wrap_triton isn't necessary in some situations (eager-mode with regular Tensors)", + "type": "function", + "file_path": "pytorch\\torch\\_library\\triton.py", + "ast_data": "FunctionDef name:set_wrap_triton_enabled arg:enabled arguments arg Try Assign Call Assign Assign" + }, + { + "library": "tensorflow", + "name": "_DenseToSparseBatchDataset", + "source_code": "class _DenseToSparseBatchDataset(dataset_ops.UnaryDataset):\n\n def __init__(self, input_dataset, batch_size, row_shape, name=None):\n if not isinstance(dataset_ops.get_legacy_output_types(input_dataset), dtypes.DType):\n raise TypeError(f'`dense_to_sparse_batch` requires an input dataset whose elements have a single component, but the given dataset has the following component types: {dataset_ops.get_legacy_output_types(input_dataset)}.')\n self._input_dataset = input_dataset\n self._batch_size = batch_size\n self._row_shape = row_shape\n self._element_spec = sparse_tensor.SparseTensorSpec(tensor_shape.TensorShape([None]).concatenate(self._row_shape), dataset_ops.get_legacy_output_types(input_dataset))\n self._name = name\n variant_tensor = ged_ops.dense_to_sparse_batch_dataset(self._input_dataset._variant_tensor, self._batch_size, row_shape=convert.partial_shape_to_tensor(self._row_shape), **self._flat_structure)\n super(_DenseToSparseBatchDataset, self).__init__(input_dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._element_spec", + "docstring": "A that batches ragged dense elements into s.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\sparse_batch_op.py", + "ast_data": "ClassDef name:_DenseToSparseBatchDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:batch_size arg:row_shape arg:name arguments arg arg arg arg arg If Call Call Raise Call Call Assign Assign Assign Assign Call Call Call Call Assign Assign Call Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes" + }, + { + "library": "scipy", + "name": "mean", + "source_code": "def mean(self, df, scale):\n dim, df, scale = self._process_parameters(df, scale)\n out = self._mean(dim, df, scale)\n return _squeeze_output(out) if out is not None else out", + "docstring": "Mean of the inverse Wishart distribution. Only valid if the degrees of freedom are greater than the dimension of the scale matrix plus one. Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float or None The mean of the distribution", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:mean arg:self arg:df arg:scale arguments arg arg arg Assign Call Assign Call Return return:yes Compare Call" + }, + { + "library": "scikit-learn", + "name": "partial_fit", + "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y=None):\n xp, _ = get_namespace(X)\n first_pass = not hasattr(self, 'n_samples_seen_')\n X = validate_data(self, X, reset=first_pass, accept_sparse=('csr', 'csc'), dtype=_array_api.supported_float_dtypes(xp), ensure_all_finite='allow-nan')\n if sparse.issparse(X):\n mins, maxs = min_max_axis(X, axis=0, ignore_nan=True)\n max_abs = np.maximum(np.abs(mins), np.abs(maxs))\n else:\n max_abs = _array_api._nanmax(xp.abs(X), axis=0, xp=xp)\n if first_pass:\n self.n_samples_seen_ = X.shape[0]\n else:\n max_abs = xp.maximum(self.max_abs_, max_abs)\n self.n_samples_seen_ += X.shape[0]\n self.max_abs_ = max_abs\n self.scale_ = _handle_zeros_in_scale(max_abs, copy=True)\n return self", + "docstring": "Online computation of max absolute value of X for later scaling. All of X is processed as a single batch. This is intended for cases when :meth: is not feasible due to very large number of or because X is read from a continuous stream. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to compute the mean and standard deviation used for later scaling along the features axis. y : None Ignored. Returns ------- self : object Fitted scaler.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", + "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arguments arg arg arg Assign Call Assign Call Assign Call Call If Call Assign Call Assign Call Call Call Assign Call Call If Assign Assign Call Assign Assign Call Return return:yes Call" + }, + { + "library": "pygame", + "name": "metrics", + "source_code": "def metrics(self, text):\n return self.get_metrics(text)", + "docstring": "metrics(text) -> list Gets the metrics for each character in the passed string.", + "type": "method", + "file_path": "pygame\\src_py\\ftfont.py", + "ast_data": "FunctionDef name:metrics arg:self arg:text arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "trima", + "source_code": "def trima(a, limits=None, inclusive=(True, True)):\n a = ma.asarray(a)\n a.unshare_mask()\n if limits is None or limits == (None, None):\n return a\n lower_lim, upper_lim = limits\n lower_in, upper_in = inclusive\n condition = False\n if lower_lim is not None:\n if lower_in:\n condition |= a < lower_lim\n else:\n condition |= a <= lower_lim\n if upper_lim is not None:\n if upper_in:\n condition |= a > upper_lim\n else:\n condition |= a >= upper_lim\n a[condition.filled(True)] = masked\n return a", + "docstring": "Trims an array by masking the data outside some given limits. Returns a masked version of the input array. Parameters ---------- a : array_like Input array. limits : {None, tuple}, optional Tuple of (lower limit, upper limit) in absolute values. Values of the input array lower (greater) than the lower (upper) limit will be masked. A limit is None indicates an open interval. inclusive : (bool, bool) tuple, optional Tuple of (lower flag, upper flag), indicating whether values exactly equal to the lower (upper) limit are allowed. Examples -------- >>> from scipy.stats.mstats import trima >>> import numpy as np >>> a = np.arange(10) The interval is left-closed and right-open, i.e., . Trim the array by keeping only values in the interval. >>> trima(a, limits=(2, 8), inclusive=(True, False)) masked_array(data=[--, --, 2, 3, 4, 5, 6, 7, --, --], mask=[ True, True, False, False, False, False, False, False, True, True], fill_value=999999)", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", + "ast_data": "FunctionDef name:trima arg:a arg:limits arg:inclusive arguments arg arg arg Assign Call Call If BoolOp Compare Compare Return return:yes Assign Assign Assign If Compare If Compare Compare If Compare If Compare Compare Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "Samples", + "source_code": "class Samples(object):\n\n def __init__(self, string_table):\n self._string_table = string_table\n self._node_name_to_sample = {}\n\n def add(self, datum, location_ids):\n node_name = datum.node_exec_stats.node_name\n if node_name in self._node_name_to_sample:\n sample = self._node_name_to_sample[node_name]\n sample.location_id.extend(location_ids)\n else:\n sample = profile_pb2.Sample()\n sample.value.extend([0, 0, 0])\n label = sample.label.add()\n label.key = self._string_table.index_of('node_name')\n label.str = self._string_table.index_of(node_name)\n label = sample.label.add()\n label.key = self._string_table.index_of('op_type')\n label.str = self._string_table.index_of(datum.op_type)\n self._node_name_to_sample[node_name] = sample\n sample.value[0] += 1\n sample.value[1] += datum.node_exec_stats.all_end_rel_micros\n sample.value[2] += datum.node_exec_stats.op_end_rel_micros - datum.node_exec_stats.op_start_rel_micros\n\n def get_sample_protos(self):\n return self._node_name_to_sample.values()", + "docstring": "Keeps track of protos for pprof profile. Samples store the following statistics in order: count, all_time, op_time", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py", + "ast_data": "ClassDef name:Samples FunctionDef name:__init__ arg:self arg:string_table arguments arg arg Assign Assign FunctionDef name:add arg:self arg:datum arg:location_ids arguments arg arg arg Assign If Compare Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign FunctionDef name:get_sample_protos arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_binary_search_insert_arg", + "source_code": "def _binary_search_insert_arg(ordered_args, new_arg):\n if len(ordered_args) == 0:\n return [new_arg]\n from sympy.core.basic import _args_sortkey as sort_key, Basic\n if sort_key(ordered_args[-1]) < sort_key(new_arg):\n return ordered_args + [new_arg]\n if sort_key(ordered_args[0]) > sort_key(new_arg):\n return [new_arg] + ordered_args\n low, high = (0, len(ordered_args) - 1)\n while low <= high:\n mid = (low + high) // 2\n compare_result = Basic.compare(ordered_args[mid], new_arg)\n if compare_result == 0:\n return None\n elif compare_result < 0:\n low = mid + 1\n else:\n high = mid - 1\n ordered_args.insert(low, new_arg)\n return ordered_args", + "docstring": "If new_arg is found in ordered_args None is returned, else the new ordered_args with new_arg inserted", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\sym_node.py", + "ast_data": "FunctionDef name:_binary_search_insert_arg arg:ordered_args arg:new_arg arguments arg arg If Compare Call Return return:yes If Compare Call Call Return return:yes If Compare Call Call Return return:yes Assign Call While Compare Assign Assign Call If Compare Return return:no If Compare Assign Assign Call Return return:yes" + }, + { + "library": "sphinx", + "name": "_load_builtin_themes", + "source_code": "def _load_builtin_themes(self) -> None:\n themes = self._find_themes(package_dir / 'themes')\n for name, theme in themes.items():\n self._themes[name] = _StrPath(theme)", + "docstring": "Load built-in themes.", + "type": "method", + "file_path": "sphinx\\sphinx\\theming.py", + "ast_data": "FunctionDef name:_load_builtin_themes arg:self arguments arg Assign Call For Call Assign Call" + }, + { + "library": "pytorch", + "name": "validate_idx", + "source_code": "def validate_idx(rank: int, idx: int):\n assert isinstance(idx, Dim)\n assert isinstance(rank, Dim)\n assert idx >= 0 and idx < rank or idx == 0", + "docstring": "Validates that idx is a valid index for the given shape. Assumes the index is already canonicalized.", + "type": "function", + "file_path": "pytorch\\torch\\_prims_common\\__init__.py", + "ast_data": "FunctionDef name:validate_idx arg:rank arg:idx arguments arg arg Call Call BoolOp BoolOp Compare Compare Compare" + }, + { + "library": "django", + "name": "add", + "source_code": "def add(self, level, message, extra_tags=''):\n if not message:\n return\n level = int(level)\n if level < self.level:\n return\n self.added_new = True\n message = Message(level, message, extra_tags=extra_tags)\n self._queued_messages.append(message)", + "docstring": "Queue a message to be stored. The message is only queued if it contained something and its level is not less than the recording level (``).", + "type": "method", + "file_path": "django\\django\\contrib\\messages\\storage\\base.py", + "ast_data": "FunctionDef name:add arg:self arg:level arg:message arg:extra_tags arguments arg arg arg arg If Return return:no Assign Call If Compare Return return:no Assign Assign Call Call" + }, + { + "library": "pytorch", + "name": "glu", + "source_code": "def glu(input: Tensor, dim: int=-1) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(glu, (input,), input, dim=dim)\n if input.dim() == 0:\n raise RuntimeError('glu does not support scalars because halving size must be even')\n return torch._C._nn.glu(input, dim)", + "docstring": "glu(input, dim=-1) -> Tensor The gated linear unit. Computes: .. math :: \\text{GLU}(a, b) = a \\otimes \\sigma(b) where is split in half along to form and , :math: is the sigmoid function and :math: is the element-wise product between matrices. See _. Args: input (Tensor): input tensor dim (int): dimension on which to split the input. Default: -1", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:glu arg:input arg:dim arguments arg arg If Call Return return:yes Call If Compare Call Raise Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "_threshold_scores_to_class_labels", + "source_code": "def _threshold_scores_to_class_labels(y_score, threshold, classes, pos_label):\n if pos_label is None:\n map_thresholded_score_to_label = np.array([0, 1])\n else:\n pos_label_idx = np.flatnonzero(classes == pos_label)[0]\n neg_label_idx = np.flatnonzero(classes != pos_label)[0]\n map_thresholded_score_to_label = np.array([neg_label_idx, pos_label_idx])\n return classes[map_thresholded_score_to_label[(y_score >= threshold).astype(int)]]", + "docstring": "Threshold and return the associated class labels.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\metrics\\_scorer.py", + "ast_data": "FunctionDef name:_threshold_scores_to_class_labels arg:y_score arg:threshold arg:classes arg:pos_label arguments arg arg arg arg If Compare Assign Call Assign Call Compare Assign Call Compare Assign Call Return return:yes Call Compare" + }, + { + "library": "sphinx", + "name": "post_process_images", + "source_code": "def post_process_images(self, doctree: Node) -> None:\n images = ImageAdapter(self.env)\n for node in doctree.findall(nodes.image):\n if '?' in node['candidates']:\n continue\n if '*' not in node['candidates']:\n for imgtype in self.supported_image_types:\n candidate = node['candidates'].get(imgtype, None)\n if candidate:\n break\n else:\n mimetypes = sorted(node['candidates'])\n image_uri = images.get_original_image_uri(node['uri'])\n if mimetypes:\n logger.warning(__('a suitable image for %s builder not found: %s (%s)'), self.name, mimetypes, image_uri, location=node)\n else:\n logger.warning(__('a suitable image for %s builder not found: %s'), self.name, image_uri, location=node)\n continue\n node['uri'] = candidate\n else:\n candidate = node['uri']\n if candidate not in self.env.images:\n continue\n self.images[candidate] = self.env.images[candidate][1]", + "docstring": "Pick the best candidate for all image URIs.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\__init__.py", + "ast_data": "FunctionDef name:post_process_images arg:self arg:doctree arguments arg arg Assign Call For Call If Compare If Compare For Assign Call If Assign Call Assign Call If Call Call Call Call Assign Assign If Compare Assign" + }, + { + "library": "tensorflow", + "name": "get_global_generator", + "source_code": "@tf_export('random.get_global_generator', 'random.experimental.get_global_generator')\ndef get_global_generator():\n global global_generator\n if global_generator is None:\n if config.is_op_determinism_enabled():\n raise RuntimeError('\"get_global_generator\" cannot be called if determinism is enabled, unless \"set_global_generator\" has already been called. Please call \"set_global_generator\" first.')\n with ops.init_scope():\n global_generator = Generator.from_non_deterministic_state()\n return global_generator", + "docstring": "Retrieves the global generator. This function will create the global generator the first time it is called, and the generator will be placed at the default device at that time, so one needs to be careful when this function is first called. Using a generator placed on a less-ideal device will incur performance regression. Returns: The global object.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py", + "ast_data": "FunctionDef name:get_global_generator arguments If Compare If Call Raise Call With Call Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "init_gradient_and_hessian", + "source_code": "def init_gradient_and_hessian(self, n_samples, dtype=np.float64, order='F'):\n if dtype not in (np.float32, np.float64):\n raise ValueError(f\"Valid options for 'dtype' are np.float32 and np.float64. Got dtype={dtype} instead.\")\n if self.is_multiclass:\n shape = (n_samples, self.n_classes)\n else:\n shape = (n_samples,)\n gradient = np.empty(shape=shape, dtype=dtype, order=order)\n if self.constant_hessian:\n hessian = np.ones(shape=(1,), dtype=dtype)\n else:\n hessian = np.empty(shape=shape, dtype=dtype, order=order)\n return (gradient, hessian)", + "docstring": "Initialize arrays for gradients and hessians. Unless hessians are constant, arrays are initialized with undefined values. Parameters ---------- n_samples : int The number of samples, usually passed to . dtype : {np.float64, np.float32}, default=np.float64 The dtype of the arrays gradient and hessian. order : {'C', 'F'}, default='F' Order of the arrays gradient and hessian. The default 'F' makes the arrays contiguous along samples. Returns ------- gradient : C-contiguous array of shape (n_samples,) or array of shape (n_samples, n_classes) Empty array (allocated but not initialized) to be used as argument gradient_out. hessian : C-contiguous array of shape (n_samples,), array of shape (n_samples, n_classes) or shape (1,) Empty (allocated but not initialized) array to be used as argument hessian_out. If constant_hessian is True (e.g. ), the array is initialized to ``.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\_loss\\loss.py", + "ast_data": "FunctionDef name:init_gradient_and_hessian arg:self arg:n_samples arg:dtype arg:order arguments arg arg arg arg If Compare Raise Call If Assign Assign Assign Call If Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_MaybeColocateWith", + "source_code": "@tf_contextlib.contextmanager\ndef _MaybeColocateWith(inputs):\n if not inputs:\n yield\n else:\n with ops.colocate_with(inputs[0]), _MaybeColocateWith(inputs[1:]):\n yield", + "docstring": "A context manager for (maybe) colocating with a list of input tensors. Args: inputs: A list of or objects. Returns: A context manager.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\op_def_library.py", + "ast_data": "FunctionDef name:_MaybeColocateWith arg:inputs arguments arg If With Call Call" + }, + { + "library": "numpy", + "name": "mod", + "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_mod_dispatcher)\ndef mod(a, values):\n return _to_bytes_or_str_array(_vec_string(a, np.object_, '__mod__', (values,)), a)", + "docstring": "Return (a % i), that is pre-Python 2.6 string formatting (interpolation), element-wise for a pair of array_likes of str or unicode. Parameters ---------- a : array_like, with or dtype values : array_like of values These values will be element-wise interpolated into the string. Returns ------- out : ndarray Output array of `` dtype, depending on input types Examples -------- >>> import numpy as np >>> a = np.array([\"NumPy is a %s library\"]) >>> np.strings.mod(a, values=[\"Python\"]) array(['NumPy is a Python library'], dtype='>> a = np.array([b'%d bytes', b'%d bits']) >>> values = np.array([8, 64]) >>> np.strings.mod(a, values) array([b'8 bytes', b'64 bits'], dtype='|S7')", + "type": "function", + "file_path": "numpy\\numpy\\_core\\strings.py", + "ast_data": "FunctionDef name:mod arg:a arg:values arguments arg arg Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_LocalCloudTpuClient", + "source_code": "class _LocalCloudTpuClient(object):\n\n def api_available(self):\n return False", + "docstring": "Dummy local Cloud TPU client.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py", + "ast_data": "ClassDef name:_LocalCloudTpuClient FunctionDef name:api_available arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "Identity", + "source_code": "class Identity(sympy.Function):\n precedence = 10\n\n def __repr__(self):\n return f'Identity({self.args[0]})'\n\n def _eval_is_real(self):\n return self.args[0].is_real\n\n def _eval_is_integer(self):\n return self.args[0].is_integer\n\n def _eval_expand_identity(self, **hints):\n return self.args[0]", + "docstring": "Prevents expansion and other optimizations", + "type": "class", + "file_path": "pytorch\\torch\\utils\\_sympy\\functions.py", + "ast_data": "ClassDef name:Identity Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:_eval_is_real arg:self arguments arg Return return:yes FunctionDef name:_eval_is_integer arg:self arguments arg Return return:yes FunctionDef name:_eval_expand_identity arg:self arguments arg arg Return return:yes" + }, + { + "library": "django", + "name": "tuple", + "source_code": "@property\ndef tuple(self):\n return tuple((self[i] for i in range(len(self))))", + "docstring": "Return the tuple representation of this LineString.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", + "ast_data": "FunctionDef name:tuple arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "django", + "name": "get_changelist_instance", + "source_code": "def get_changelist_instance(self, request):\n list_display = self.get_list_display(request)\n list_display_links = self.get_list_display_links(request, list_display)\n if self.get_actions(request):\n list_display = ['action_checkbox', *list_display]\n sortable_by = self.get_sortable_by(request)\n ChangeList = self.get_changelist(request)\n return ChangeList(request, self.model, list_display, list_display_links, self.get_list_filter(request), self.date_hierarchy, self.get_search_fields(request), self.get_list_select_related(request), self.list_per_page, self.list_max_show_all, self.list_editable, self, sortable_by, self.search_help_text)", + "docstring": "Return a instance based on . May raise .", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:get_changelist_instance arg:self arg:request arguments arg arg Assign Call Assign Call If Call Assign Assign Call Assign Call Return return:yes Call Call Call Call" + }, + { + "library": "authlib", + "name": "validate_amr", + "source_code": "def validate_amr(self):\n amr = self.get('amr')\n if amr and (not isinstance(self['amr'], list)):\n raise InvalidClaimError('amr')", + "docstring": "OPTIONAL. Authentication Methods References. JSON array of strings that are identifiers for authentication methods used in the authentication. For instance, values might indicate that both password and OTP authentication methods were used. The definition of particular values to be used in the amr Claim is beyond the scope of this specification. Parties using this claim will need to agree upon the meanings of the values used, which may be context-specific. The amr value is an array of case sensitive strings.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\core\\claims.py", + "ast_data": "FunctionDef name:validate_amr arg:self arguments arg Assign Call If BoolOp Call Raise Call" + }, + { + "library": "pytorch", + "name": "impl", + "source_code": "def impl(self, device_types: typing.Union[str, typing.Iterable[str]], _stacklevel=2) -> typing.Callable:\n if isinstance(device_types, str):\n device_types = [device_types]\n for device_type in device_types:\n validate_device_type(device_type)\n\n def inner(f):\n for device_type in set(device_types):\n self._check_doesnt_have_library_impl(device_type)\n self._register_impl(device_type, f, stacklevel=_stacklevel)\n dispatch_key = SUPPORTED_DEVICE_TYPE_TO_KEY[device_type]\n library.impl(self._lib, self._opname, dispatch_key)(f)\n return f\n return inner", + "docstring": "This API is deprecated, please use torch.library.custom_op instead", + "type": "method", + "file_path": "pytorch\\torch\\_custom_op\\impl.py", + "ast_data": "FunctionDef name:impl arg:self arg:device_types arg:_stacklevel arguments arg arg arg If Call Assign For Call FunctionDef name:inner arg:f arguments arg For Call Call Call Assign Call Call Return return:yes Return return:yes" + }, + { + "library": "kornia", + "name": "rgb_to_y", + "source_code": "def rgb_to_y(image: Tensor) -> Tensor:\n if not isinstance(image, Tensor):\n raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n r: Tensor = image[..., 0:1, :, :]\n g: Tensor = image[..., 1:2, :, :]\n b: Tensor = image[..., 2:3, :, :]\n y: Tensor = _rgb_to_y(r, g, b)\n return y", + "docstring": "Convert an RGB image to Y. Args: image: RGB Image to be converted to Y with shape :math:. Returns: Y version of the image with shape :math:. Examples: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb_to_y(input) # 2x1x4x5", + "type": "function", + "file_path": "kornia\\kornia\\color\\ycbcr.py", + "ast_data": "FunctionDef name:rgb_to_y arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "_unpack_nested_dtype", + "source_code": "def _unpack_nested_dtype(other: Index) -> DtypeObj:\n dtype = other.dtype\n if isinstance(dtype, CategoricalDtype):\n return dtype.categories.dtype\n elif isinstance(dtype, ArrowDtype):\n import pyarrow as pa\n if pa.types.is_dictionary(dtype.pyarrow_dtype):\n other = other[:0].astype(ArrowDtype(dtype.pyarrow_dtype.value_type))\n return other.dtype", + "docstring": "When checking if our dtype is comparable with another, we need to unpack CategoricalDtype to look at its categories.dtype. Parameters ---------- other : Index Returns ------- np.dtype or ExtensionDtype", + "type": "function", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_unpack_nested_dtype arg:other arguments arg Assign If Call Return return:yes If Call If Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "broadcast_dynamic_shape", + "source_code": "@dispatch.dispatch_for_api(array_ops.broadcast_dynamic_shape)\ndef broadcast_dynamic_shape(shape_x: dynamic_ragged_shape.DenseOrRaggedShape, shape_y: dynamic_ragged_shape.DenseOrRaggedShape) -> dynamic_ragged_shape.DynamicRaggedShape:\n if not isinstance(shape_x, dynamic_ragged_shape.DynamicRaggedShape):\n shape_x = dynamic_ragged_shape.DynamicRaggedShape([], shape_x)\n if not isinstance(shape_y, dynamic_ragged_shape.DynamicRaggedShape):\n shape_y = dynamic_ragged_shape.DynamicRaggedShape([], shape_y)\n return dynamic_ragged_shape.broadcast_dynamic_shape(shape_x, shape_y)", + "docstring": "Returns the shape formed by broadcasting two shapes to be compatible. 1. If shape_x and shape_y both have row_partitions, then fail if their dtypes don't match. 2. If neither has row_partitions and they have different dtypes, go with int64. 3. If one has row_partitions, go with that dtype. Args: shape_x: A shape_y: A Returns: A . Raises: ValueError: If and are not broadcast-compatible.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py", + "ast_data": "FunctionDef name:broadcast_dynamic_shape arg:shape_x arg:shape_y arguments arg arg If Call Assign Call If Call Assign Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "check_random_state", + "source_code": "def check_random_state(seed=None):\n if seed is None or isinstance(seed, numbers.Integral | np.integer):\n return np.random.default_rng(seed)\n elif isinstance(seed, np.random.RandomState | np.random.Generator):\n return seed\n else:\n raise ValueError(f'{seed!r} cannot be used to seed a numpy.random.Generator instance')", + "docstring": "Turn into a instance. Parameters ---------- seed : {None, int, , }, optional If is an int or None, a new is created using `seednumpy.random.Generatornumpy.random.RandomState`} Random number generator.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_qmc.py", + "ast_data": "FunctionDef name:check_random_state arg:seed arguments arg If BoolOp Compare Call Return return:yes Call If Call Return return:yes Raise Call" + }, + { + "library": "tensorflow", + "name": "list_pop", + "source_code": "def list_pop(list_, i, opts):\n assert isinstance(opts, ListPopOpts)\n if isinstance(list_, tensor_array_ops.TensorArray):\n raise ValueError('TensorArray does not support item removal')\n elif tensor_util.is_tf_type(list_):\n if list_.dtype == dtypes.variant:\n return _tf_tensor_list_pop(list_, i, opts)\n else:\n raise ValueError('tensor lists are expected to be Tensors with dtype=tf.variant, instead found %s' % list_)\n else:\n return _py_list_pop(list_, i)", + "docstring": "The list pop function. Note: it is unspecified where list_ will be mutated or not. If list_ is a TensorFlow entity, it will not be typically mutated. If list_ is a plain list, it will be. In general, if the list is mutated then the return value should point to the original entity. Args: list_: An entity that supports pop semantics. i: Optional index to pop from. May be None. opts: A ListPopOpts. Returns: Tuple (x, out_list_): out_list_: same as list_, after the removal was performed. x: the removed element value. Raises: ValueError: if list_ is not of a known list-like type or the operation is not supported for that type.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\data_structures.py", + "ast_data": "FunctionDef name:list_pop arg:list_ arg:i arg:opts arguments arg arg arg Call If Call Raise Call If Call If Compare Return return:yes Call Raise Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "round_to_int", + "source_code": "def round_to_int(self, x: T, dtype: torch.dtype) -> T:\n raise NotImplementedError", + "docstring": "Convert x to dtype with round-to-even semantics. See also trunc_to_int.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ops_handler.py", + "ast_data": "FunctionDef name:round_to_int arg:self arg:x arg:dtype arguments arg arg arg Raise" + }, + { + "library": "scipy", + "name": "from_eigendecomposition", + "source_code": "@staticmethod\ndef from_eigendecomposition(eigendecomposition):\n return CovViaEigendecomposition(eigendecomposition)", + "docstring": "Representation of a covariance provided via eigendecomposition Parameters ---------- eigendecomposition : sequence A sequence (nominally a tuple) containing the eigenvalue and eigenvector arrays as computed by or . Notes ----- Let the covariance matrix be :math:, let :math: be matrix of eigenvectors, and let :math: be the diagonal matrix of eigenvalues such that . When all of the eigenvalues are strictly positive, whitening of a data point :math: is performed by computing :math:, where the inverse square root can be taken element-wise. :math: is calculated as :math:, where the :math: operation is performed element-wise. This class supports singular covariance matrices. When computing `CovarianceCovariance` object against reference implementations. >>> res = cov.whiten(x) >>> ref = x @ (v @ np.diag(w**-0.5)) >>> np.allclose(res, ref) True >>> res = cov.log_pdet >>> ref = np.linalg.slogdet(A)[-1] >>> np.allclose(res, ref) True", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_covariance.py", + "ast_data": "FunctionDef name:from_eigendecomposition arg:eigendecomposition arguments arg Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "autolabel_auc", + "source_code": "def autolabel_auc(rects, ax):\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width() / 2.0, 1.05 * height, '%.3f' % height, ha='center', va='bottom')", + "docstring": "Attach a text label above each bar displaying its height.", + "type": "function", + "file_path": "scikit-learn\\benchmarks\\bench_online_ocsvm.py", + "ast_data": "FunctionDef name:autolabel_auc arg:rects arg:ax arguments arg arg For Assign Call Call Call Call" + }, + { + "library": "authlib", + "name": "as_json", + "source_code": "def as_json(self, is_private=False, **params):\n obj = self.as_dict(is_private, **params)\n return json_dumps(obj)", + "docstring": "Represent this key as a JSON string.", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7517\\base_key.py", + "ast_data": "FunctionDef name:as_json arg:self arg:is_private arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "update", + "source_code": "def update(self, **kwargs):\n self._not_support_combined_queries('update')\n if self.query.is_sliced:\n raise TypeError('Cannot update a query once a slice has been taken.')\n self._for_write = True\n query = self.query.chain(sql.UpdateQuery)\n query.add_update_values(kwargs)\n new_order_by = []\n for col in query.order_by:\n alias = col\n descending = False\n if isinstance(alias, str) and alias.startswith('-'):\n alias = alias.removeprefix('-')\n descending = True\n if (annotation := query.annotations.get(alias)):\n if getattr(annotation, 'contains_aggregate', False):\n raise exceptions.FieldError(f'Cannot update when ordering by an aggregate: {annotation}')\n if descending:\n annotation = annotation.desc()\n new_order_by.append(annotation)\n else:\n new_order_by.append(col)\n query.order_by = tuple(new_order_by)\n query.clear_select_clause()\n with transaction.mark_for_rollback_on_error(using=self.db):\n rows = query.get_compiler(self.db).execute_sql(ROW_COUNT)\n self._result_cache = None\n return rows", + "docstring": "Update all elements in the current QuerySet, setting all the given fields to the appropriate values.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:update arg:self arguments arg arg Call If Raise Call Assign Assign Call Call Assign For Assign Assign If BoolOp Call Call Assign Call Assign If Call If Call Raise Call If Assign Call Call Call Assign Call Call With Call Assign Call Call Assign Return return:yes" + }, + { + "library": "pandas", + "name": "add_categories", + "source_code": "def add_categories(self, new_categories) -> Self:\n if not is_list_like(new_categories):\n new_categories = [new_categories]\n already_included = set(new_categories) & set(self.dtype.categories)\n if len(already_included) != 0:\n raise ValueError(f'new categories must not include old categories: {already_included}')\n if hasattr(new_categories, 'dtype'):\n from pandas import Series\n dtype = find_common_type([self.dtype.categories.dtype, new_categories.dtype])\n new_categories = Series(list(self.dtype.categories) + list(new_categories), dtype=dtype)\n else:\n new_categories = list(self.dtype.categories) + list(new_categories)\n new_dtype = CategoricalDtype(new_categories, self.ordered)\n cat = self.copy()\n codes = coerce_indexer_dtype(cat._ndarray, new_dtype.categories)\n NDArrayBacked.__init__(cat, codes, new_dtype)\n return cat", + "docstring": "Add new categories. will be included at the last/highest place in the categories and will be unused directly after this call. Parameters ---------- new_categories : category or list-like of category The new categories to be included. Returns ------- Categorical Categorical with new categories added. Raises ------ ValueError If the new categories include old categories or do not validate as categories See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. Examples -------- >>> c = pd.Categorical([\"c\", \"b\", \"c\"]) >>> c ['c', 'b', 'c'] Categories (2, object): ['b', 'c'] >>> c.add_categories([\"d\", \"a\"]) ['c', 'b', 'c'] Categories (4, object): ['b', 'c', 'd', 'a']", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", + "ast_data": "FunctionDef name:add_categories arg:self arg:new_categories arguments arg arg If Call Assign Assign Call Call If Compare Call Raise Call If Call Assign Call Assign Call Call Call Assign Call Call Assign Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "add", + "source_code": "def add(self, expr: SympyBoolean) -> bool:\n if expr == sympy.true:\n return True\n orig_expr = expr\n orig_reduced = orig_expr.xreplace(self._var_to_val)\n if orig_reduced == sympy.false:\n self._inconsistencies.append(f'{orig_expr} is inconsistent!')\n if isinstance(expr, (sympy.Ne, sympy.Or, sympy.And)) or self._has_unsupported_sympy_function(expr):\n return False\n free_symbols = expr.free_symbols\n assert free_symbols, f'Did not expect constraint with no free variables: {expr}'\n if len(free_symbols) > 1:\n self._multivariate_inequalities.add(expr)\n else:\n s = next(iter(free_symbols))\n old_n_congruences = len(self._congruences[s])\n expr = self.rewrite_with_congruences(s, expr)\n new_n_congruences = len(self._congruences[s])\n if expr == sympy.true:\n return old_n_congruences == new_n_congruences\n reduced = expr.xreplace(self._var_to_val)\n if reduced == sympy.false:\n self._inconsistencies.append(f'{expr}, obtained by rewriting {orig_expr} with congruences, is inconsistent!')\n if isinstance(expr, sympy.Eq):\n self._symbols_with_equalities.add(s)\n self._univariate_inequalities[s].add(expr)\n return False", + "docstring": "Add an expression to the set of constraints. Return whether the expression is a trivial constraint (i.e., an obvious tautology).", + "type": "method", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:add arg:self arg:expr arguments arg arg If Compare Return return:yes Assign Assign Call If Compare Call If BoolOp Call Call Return return:yes Assign If Compare Call Call Assign Call Call Assign Call Assign Call Assign Call If Compare Return return:yes Compare Assign Call If Compare Call If Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "save_as_bf16", + "source_code": "@save_as_bf16.setter\ndef save_as_bf16(self, save_as_bf16):\n self._save_as_bf16 = save_as_bf16 and self.dtype == dtypes.float32", + "docstring": "Enables saving float32 as bfloat16.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\d_variable.py", + "ast_data": "FunctionDef name:save_as_bf16 arg:self arg:save_as_bf16 arguments arg arg Assign BoolOp Compare" + }, + { + "library": "pandas", + "name": "need_slice", + "source_code": "def need_slice(obj: slice) -> bool:\n return obj.start is not None or obj.stop is not None or (obj.step is not None and obj.step != 1)", + "docstring": "Returns ------- bool", + "type": "function", + "file_path": "pandas\\pandas\\core\\indexing.py", + "ast_data": "FunctionDef name:need_slice arg:obj arguments arg Return return:yes BoolOp Compare Compare BoolOp Compare Compare" + }, + { + "library": "tensorflow", + "name": "flush", + "source_code": "def flush(self):\n if not self._closed:\n self._flush_complete.clear()\n self._try_put(self._flush_sentinel)\n self._flush_complete.wait()\n if self._worker.failure_exc_info:\n self._internal_close()\n _, exception, _ = self._worker.failure_exc_info\n raise exception", + "docstring": "Flushes the event file to disk. Call this method to make sure that all pending events have been written to disk.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py", + "ast_data": "FunctionDef name:flush arg:self arguments arg If Call Call Call If Call Assign Raise" + }, + { + "library": "pytorch", + "name": "_set_thread_name", + "source_code": "def _set_thread_name(name: str) -> None:\n torch._C._set_thread_name(name)", + "docstring": "Set the name of the current thread. Args: name (str): Name of the current thread.", + "type": "function", + "file_path": "pytorch\\torch\\multiprocessing\\__init__.py", + "ast_data": "FunctionDef name:_set_thread_name arg:name arguments arg Call" + }, + { + "library": "pytorch", + "name": "add_edge", + "source_code": "def add_edge(self, u, v):\n self.add_node(u)\n self.add_node(v)\n self._succ[u][v] = True\n self._pred[v][u] = True", + "docstring": "Add an edge to graph between nodes `` will be created if they do not already exist.", + "type": "method", + "file_path": "pytorch\\torch\\package\\_digraph.py", + "ast_data": "FunctionDef name:add_edge arg:self arg:u arg:v arguments arg arg arg Call Call Assign Assign" + }, + { + "library": "pandas", + "name": "_is_shorthand_color", + "source_code": "def _is_shorthand_color(self, color_string: str) -> bool:\n code = color_string.lstrip('#')\n if len(code) == 3:\n return True\n elif len(code) == 6:\n return False\n else:\n raise ValueError(f'Unexpected color {color_string}')", + "docstring": "Check if color code is shorthand. #FFF is a shorthand as opposed to full #FFFFFF.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\excel.py", + "ast_data": "FunctionDef name:_is_shorthand_color arg:self arg:color_string arguments arg arg Assign Call If Compare Call Return return:yes If Compare Call Return return:yes Raise Call" + }, + { + "library": "tensorflow", + "name": "_clone_and_build_model", + "source_code": "def _clone_and_build_model(model, mode, inputs=None, targets=None):\n from tensorflow.python.keras import models\n cloned_model = models.clone_model(model, input_tensors=inputs)\n if isinstance(model.optimizer, optimizers.TFOptimizer):\n optimizer = model.optimizer\n else:\n optimizer_config = model.optimizer.get_config()\n optimizer = model.optimizer.__class__.from_config(optimizer_config)\n\n def _upcast_low_precision_outputs(output):\n if output.dtype == dtypes.bfloat16:\n return math_ops.cast(output, dtypes.float32)\n else:\n return output\n cloned_model.outputs = [_upcast_low_precision_outputs(o) for o in cloned_model.outputs]\n if isinstance(targets, tuple):\n targets = nest.flatten(targets)\n if mode == ModeKeys.PREDICT and inputs is not None:\n _custom_compile_for_predict(cloned_model)\n else:\n cloned_model.compile(optimizer, model.loss, metrics=metrics_module.clone_metrics(model._compile_metrics), loss_weights=model.loss_weights, sample_weight_mode=model.sample_weight_mode, weighted_metrics=metrics_module.clone_metrics(model._compile_weighted_metrics), target_tensors=targets)\n return cloned_model", + "docstring": "Clone and build the given keras_model.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py", + "ast_data": "FunctionDef name:_clone_and_build_model arg:model arg:mode arg:inputs arg:targets arguments arg arg arg arg Assign Call If Call Assign Assign Call Assign Call FunctionDef name:_upcast_low_precision_outputs arg:output arguments arg If Compare Return return:yes Call Return return:yes Assign Call If Call Assign Call If BoolOp Compare Compare Call Call Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, rgbs: Tensor, densities: Tensor, points_3d: Tensor) -> Tensor:\n t_vals = calc_ray_t_vals(points_3d)\n deltas = t_vals[..., 1:] - t_vals[..., :-1]\n far = torch.empty(size=t_vals.shape[:-1], dtype=t_vals.dtype, device=t_vals.device).fill_(self._huge)\n deltas = torch.cat([deltas, far[..., None]], dim=-1)\n alpha = 1 - torch.exp(-1.0 * densities * deltas[..., None])\n return self._render(alpha, rgbs)", + "docstring": "Render 3D irregularly sampled points along rays. Args: rgbs: RGB values of points along rays :math: densities: Volume densities of points along rays :math: points_3d: 3D points along rays :math: Returns: Rendered RGB values for each ray :math:", + "type": "method", + "file_path": "kornia\\kornia\\nerf\\volume_renderer.py", + "ast_data": "FunctionDef name:forward arg:self arg:rgbs arg:densities arg:points_3d arguments arg arg arg arg Assign Call Assign Assign Call Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "deconv_output_length", + "source_code": "def deconv_output_length(input_length, filter_size, padding, output_padding=None, stride=0, dilation=1):\n assert padding in {'same', 'valid', 'full'}\n if input_length is None:\n return None\n filter_size = filter_size + (filter_size - 1) * (dilation - 1)\n if output_padding is None:\n if padding == 'valid':\n length = input_length * stride + max(filter_size - stride, 0)\n elif padding == 'full':\n length = input_length * stride - (stride + filter_size - 2)\n elif padding == 'same':\n length = input_length * stride\n else:\n if padding == 'same':\n pad = filter_size // 2\n elif padding == 'valid':\n pad = 0\n elif padding == 'full':\n pad = filter_size - 1\n length = (input_length - 1) * stride + filter_size - 2 * pad + output_padding\n return length", + "docstring": "Determines output length of a transposed convolution given input length. Args: input_length: Integer. filter_size: Integer. padding: one of , , . output_padding: Integer, amount of padding along the output dimension. Can be set to in which case the output length is inferred. stride: Integer. dilation: Integer. Returns: The output length (integer).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\conv_utils.py", + "ast_data": "FunctionDef name:deconv_output_length arg:input_length arg:filter_size arg:padding arg:output_padding arg:stride arg:dilation arguments arg arg arg arg arg arg Compare If Compare Return return:no Assign If Compare If Compare Assign Call If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Assign Return return:yes" + }, + { + "library": "numpy", + "name": "_check_libs", + "source_code": "def _check_libs(self, lib_dirs, libs, opt_libs, exts):\n if not is_sequence(lib_dirs):\n lib_dirs = [lib_dirs]\n found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts)\n if len(found_libs) > 0 and len(found_libs) == len(libs):\n opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts)\n found_libs.extend(opt_found_libs)\n for lib_dir in opt_found_dirs:\n if lib_dir not in found_dirs:\n found_dirs.append(lib_dir)\n info = {'libraries': found_libs, 'library_dirs': found_dirs}\n return info\n else:\n return None", + "docstring": "Find mandatory and optional libs in expected paths. Missing optional libraries are silently forgotten.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\system_info.py", + "ast_data": "FunctionDef name:_check_libs arg:self arg:lib_dirs arg:libs arg:opt_libs arg:exts arguments arg arg arg arg arg If Call Assign Assign Call If BoolOp Compare Call Compare Call Call Assign Call Call For If Compare Call Assign Return return:yes Return return:no" + }, + { + "library": "tensorflow", + "name": "_SummaryContextManager", + "source_code": "class _SummaryContextManager:\n\n def __init__(self, writer, step=None):\n self._writer = writer\n self._step = step\n self._old_writer = None\n self._old_step = None\n\n def __enter__(self):\n self._old_writer = _summary_state.writer\n _summary_state.writer = self._writer\n if self._step is not None:\n self._old_step = _summary_state.step\n _summary_state.step = self._step\n return self._writer\n\n def __exit__(self, *exc):\n _summary_state.writer.flush()\n _summary_state.writer = self._old_writer\n if self._step is not None:\n _summary_state.step = self._old_step\n return False", + "docstring": "Context manager to implement SummaryWriter.as_default().", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", + "ast_data": "ClassDef name:_SummaryContextManager FunctionDef name:__init__ arg:self arg:writer arg:step arguments arg arg arg Assign Assign Assign Assign FunctionDef name:__enter__ arg:self arguments arg Assign Assign If Compare Assign Assign Return return:yes FunctionDef name:__exit__ arg:self arguments arg arg Call Assign If Compare Assign Return return:yes" + }, + { + "library": "scikit-learn", + "name": "make_friedman3", + "source_code": "@validate_params({'n_samples': [Interval(Integral, 1, None, closed='left')], 'noise': [Interval(Real, 0, None, closed='left')], 'random_state': ['random_state']}, prefer_skip_nested_validation=True)\ndef make_friedman3(n_samples=100, *, noise=0.0, random_state=None):\n generator = check_random_state(random_state)\n X = generator.uniform(size=(n_samples, 4))\n X[:, 0] *= 100\n X[:, 1] *= 520 * np.pi\n X[:, 1] += 40 * np.pi\n X[:, 3] *= 10\n X[:, 3] += 1\n y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) + noise * generator.standard_normal(size=n_samples)\n return (X, y)", + "docstring": "Generate the \"Friedman #3\" regression problem. This dataset is described in Friedman [1] and Breiman [2]. Inputs are 4 independent features uniformly distributed on the intervals:: 0 Glossary `. Returns ------- X : ndarray of shape (n_samples, 4) The input samples. y : ndarray of shape (n_samples,) The output values. References ---------- .. [1] J. Friedman, \"Multivariate adaptive regression splines\", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, \"Bagging predictors\", Machine Learning 24, pages 123-140, 1996. Examples -------- >>> from sklearn.datasets import make_friedman3 >>> X, y = make_friedman3(random_state=42) >>> X.shape (100, 4) >>> y.shape (100,) >>> list(y[:3]) [np.float64(1.54), np.float64(0.956), np.float64(0.414)]", + "type": "function", + "file_path": "scikit-learn\\sklearn\\datasets\\_samples_generator.py", + "ast_data": "FunctionDef name:make_friedman3 arg:n_samples arguments arg arg arg Assign Call Assign Call Assign Call Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_flat_types", + "source_code": "@property\ndef _flat_types(self):\n return structure.get_flat_tensor_types(self.element_spec)", + "docstring": "Returns a list s for the element tensor representation. Returns: A list s for the element tensor representation.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "FunctionDef name:_flat_types arg:self arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "matches_patterns", + "source_code": "def matches_patterns(path, patterns):\n return any((fnmatch.fnmatchcase(path, pattern) for pattern in patterns))", + "docstring": "Return True or False depending on whether the ``).", + "type": "function", + "file_path": "django\\django\\contrib\\staticfiles\\utils.py", + "ast_data": "FunctionDef name:matches_patterns arg:path arg:patterns arguments arg arg Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "harden_mask", + "source_code": "def harden_mask(self):\n self._hardmask = True\n return self", + "docstring": "Force the mask to hard, preventing unmasking by assignment. Whether the mask of a masked array is hard or soft is determined by its property. sets to `` (and returns the modified self). See Also -------- ma.MaskedArray.hardmask ma.MaskedArray.soften_mask", + "type": "method", + "file_path": "numpy\\numpy\\ma\\core.py", + "ast_data": "FunctionDef name:harden_mask arg:self arguments arg Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "get_mask", + "source_code": "def get_mask(self, name: Optional[str]=None, layer: Optional[nn.Module]=None):\n assert name is not None or layer is not None, 'Need at least name or layer obj to retrieve mask'\n if name is None:\n assert layer is not None\n name = module_to_fqn(self.model, layer)\n assert name is not None, 'layer not found in the specified model'\n if name not in self.state:\n raise ValueError('Error: layer with the given name not found')\n mask = self.state[name].get('mask', None)\n if mask is None:\n raise ValueError('Error: shape unknown, call layer() routine at least once to infer mask')\n return mask", + "docstring": "Returns mask associated to the layer. The mask is - a torch tensor is features for that layer is None. - a list of torch tensors for each feature, otherwise Note:: The shape of the mask is unknown until model.forward() is applied. Hence, if get_mask() is called before model.forward(), an error will be raised.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py", + "ast_data": "FunctionDef name:get_mask arg:self arg:name arg:layer arguments arg arg arg BoolOp Compare Compare If Compare Compare Assign Call Compare If Compare Raise Call Assign Call If Compare Raise Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_create_iterators_per_worker", + "source_code": "def _create_iterators_per_worker(worker_datasets, input_workers, options=None, canonicalize_devices=False):\n assert isinstance(input_workers, InputWorkers)\n assert len(worker_datasets) == len(input_workers.worker_devices)\n iterators = []\n for i, worker in enumerate(input_workers.worker_devices):\n with ops.device(worker):\n worker_devices = input_workers.compute_devices_for_worker(i)\n iterator = _SingleWorkerOwnedDatasetIterator(dataset=worker_datasets[i], worker=worker, devices=worker_devices, options=options, canonicalize_devices=canonicalize_devices)\n iterators.append(iterator)\n return iterators", + "docstring": "Create a multidevice iterator on each of the workers.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py", + "ast_data": "FunctionDef name:_create_iterators_per_worker arg:worker_datasets arg:input_workers arg:options arg:canonicalize_devices arguments arg arg arg arg Call Compare Call Call Assign For Call With Call Assign Call Assign Call Call Return return:yes" + }, + { + "library": "django", + "name": "min", + "source_code": "@property\ndef min(self):\n return self.statistics()[0]", + "docstring": "Return the minimum pixel value for this band.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py", + "ast_data": "FunctionDef name:min arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_copy_pool", + "source_code": "def get_copy_pool():\n global _COPY_POOL\n if _COPY_POOL is None:\n _COPY_POOL = multiprocessing.pool.ThreadPool(_COPY_THREADS)\n atexit.register(_COPY_POOL.close)\n return _COPY_POOL", + "docstring": "Shared threadpool for copying arrays. Pool instantiation takes ~ 2ms, so a singleton pool is used rather than creating a pool per SliceAggregator. Returns: The global copy threadpool.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", + "ast_data": "FunctionDef name:get_copy_pool arguments If Compare Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_transform_feature", + "source_code": "@abc.abstractmethod\ndef _transform_feature(self, inputs):\n pass", + "docstring": "Returns intermediate representation (usually a ). Uses to create an intermediate representation (usually a ) that other feature columns can use. Example usage of : Let's say a Feature column depends on raw feature ('raw') and another (input_fc). To access corresponding s, inputs will be used as follows: Args: inputs: A object to access inputs. Returns: Transformed feature .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "FunctionDef name:_transform_feature arg:self arg:inputs arguments arg arg" + }, + { + "library": "cryptography", + "name": "curve", + "source_code": "@property\n@abc.abstractmethod\ndef curve(self) -> EllipticCurve:\n pass", + "docstring": "The EllipticCurve that this key is on.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ec.py", + "ast_data": "FunctionDef name:curve arg:self arguments arg" + }, + { + "library": "django", + "name": "postgis_version", + "source_code": "def postgis_version(self):\n return self._get_postgis_func('postgis_version')", + "docstring": "Return PostGIS version number and compile-time options.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py", + "ast_data": "FunctionDef name:postgis_version arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_number_of_non_param_args", + "source_code": "def get_number_of_non_param_args(node: Node, gm: GraphModule) -> int:\n if node.op == 'call_module':\n node_obj = getattr_from_fqn(gm, node.target)\n if isinstance(node_obj, nn.LSTM):\n return 2\n return 1", + "docstring": "Assumes that all non-param args occur first. Returns the number of non-param args expected for a node. For example, for F.linear(x, weight, bias) Returns 1, because x is a non-param arg and weight and bias are params. For lstm_mod(x, hid) Returns 2, because both x and hid are non-param args.", + "type": "function", + "file_path": "pytorch\\torch\\ao\\ns\\fx\\utils.py", + "ast_data": "FunctionDef name:get_number_of_non_param_args arg:node arg:gm arguments arg arg If Compare Assign Call If Call Return return:yes Return return:yes" + }, + { + "library": "sphinx", + "name": "add_domain", + "source_code": "def add_domain(self, domain: type[Domain], override: bool=False) -> None:\n self.registry.add_domain(domain, override=override)", + "docstring": "Register a domain. :param domain: A domain class :param override: If false, do not install it if another domain is already installed as the same name If true, unconditionally install the domain. .. versionadded:: 1.0 .. versionchanged:: 1.8 Add *override* keyword.", + "type": "method", + "file_path": "sphinx\\sphinx\\application.py", + "ast_data": "FunctionDef name:add_domain arg:self arg:domain arg:override arguments arg arg arg Call" + }, + { + "library": "sphinx", + "name": "_todim", + "source_code": "def _todim(val: int | str) -> str:\n if val is None:\n return 'initial'\n elif str(val).isdigit():\n return '0' if int(val) == 0 else '%spx' % val\n return val", + "docstring": "Make val a css dimension. In particular the following transformations are performed: - None -> 'initial' (default CSS value) - 0 -> '0' - ints and string representations of ints are interpreted as pixels. Everything else is returned unchanged.", + "type": "function", + "file_path": "sphinx\\sphinx\\jinja2glue.py", + "ast_data": "FunctionDef name:_todim arg:val arguments arg If Compare Return return:yes If Call Call Return return:yes Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "Iterable", + "source_code": "class Iterable(object):\n\n def __iter__(self):\n pass\n\n def reduce(self, initial_state, reduce_func):\n pass", + "docstring": "Interface for distributed objects that admit iteration/reduction.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\types\\distribute.py", + "ast_data": "ClassDef name:Iterable FunctionDef name:__iter__ arg:self arguments arg FunctionDef name:reduce arg:self arg:initial_state arg:reduce_func arguments arg arg arg" + }, + { + "library": "pandas", + "name": "column_types", + "source_code": "def column_types(self) -> np.ndarray:\n return np.asarray(self._column_types, dtype=np.dtype('S1'))", + "docstring": "Returns a numpy character array of the column types: s (string) or d (double)", + "type": "method", + "file_path": "pandas\\pandas\\io\\sas\\sas7bdat.py", + "ast_data": "FunctionDef name:column_types arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "cherrypy", + "name": "__len__", + "source_code": "def __len__(self):\n return len(self.cache)", + "docstring": "Return the number of active sessions.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", + "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "_compute_tri_eccentricities", + "source_code": "@staticmethod\ndef _compute_tri_eccentricities(tris_pts):\n a = np.expand_dims(tris_pts[:, 2, :] - tris_pts[:, 1, :], axis=2)\n b = np.expand_dims(tris_pts[:, 0, :] - tris_pts[:, 2, :], axis=2)\n c = np.expand_dims(tris_pts[:, 1, :] - tris_pts[:, 0, :], axis=2)\n dot_a = (_transpose_vectorized(a) @ a)[:, 0, 0]\n dot_b = (_transpose_vectorized(b) @ b)[:, 0, 0]\n dot_c = (_transpose_vectorized(c) @ c)[:, 0, 0]\n return _to_matrix_vectorized([[(dot_c - dot_b) / dot_a], [(dot_a - dot_c) / dot_b], [(dot_b - dot_a) / dot_c]])", + "docstring": "Compute triangle eccentricities. Parameters ---------- tris_pts : array like of dim 3 (shape: (nx, 3, 2)) Coordinates of the triangles apexes. Returns ------- array like of dim 2 (shape: (nx, 3)) The so-called eccentricity parameters [1] needed for HCT triangular element.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py", + "ast_data": "FunctionDef name:_compute_tri_eccentricities arg:tris_pts arguments arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "dump_ir", + "source_code": "def dump_ir(tensors, ir_format):\n if ir_format == 'text':\n return torch._C._lazy._get_tensors_text(tensors)\n elif ir_format == 'backend':\n return torch._C._lazy._get_tensors_backend(tensors)\n else:\n raise RuntimeError(f'Unrecognized IR format: {ir_format}')", + "docstring": "Return a dump of the tensors in the specified format. Valid format are - text: for LTC IR - backend: for the activate backend IR", + "type": "function", + "file_path": "pytorch\\torch\\_lazy\\debug.py", + "ast_data": "FunctionDef name:dump_ir arg:tensors arg:ir_format arguments arg arg If Compare Return return:yes Call If Compare Return return:yes Call Raise Call" + }, + { + "library": "matplotlib", + "name": "_check_1d", + "source_code": "def _check_1d(x):\n x = _unpack_to_numpy(x)\n if not hasattr(x, 'shape') or not hasattr(x, 'ndim') or len(x.shape) < 1:\n return np.atleast_1d(x)\n else:\n return x", + "docstring": "Convert scalars to 1D arrays; pass-through arrays as is.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:_check_1d arg:x arguments arg Assign Call If BoolOp Call Call Compare Call Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_prepare_standalone_module_fx", + "source_code": "def _prepare_standalone_module_fx(model: torch.nn.Module, qconfig_mapping: Union[QConfigMapping, dict[str, Any]], is_qat: bool, example_inputs: tuple[Any, ...], prepare_custom_config: Union[PrepareCustomConfig, dict[str, Any], None]=None, backend_config: Union[BackendConfig, dict[str, Any], None]=None) -> GraphModule:\n return _prepare_fx(model, qconfig_mapping, is_qat, example_inputs, prepare_custom_config, backend_config=backend_config, is_standalone_module=True)", + "docstring": "[Internal use only] Prepare a standalone module, so that it can be used when quantizing the parent module. standalone_module means it a submodule that is not inlined in parent module, and will be quantized separately as one unit. How the standalone module is observed is specified by and in the prepare_custom_config for the standalone module Returns: * model(GraphModule): prepared standalone module. It has these attributes in model.meta: * : a list of indexes for the graph input that is expected to be quantized, same as input_quantized_idxs configuration provided for the standalone module * : a list of indexs for the graph output that is quantized same as input_quantized_idxs configuration provided for the standalone module", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantize_fx.py", + "ast_data": "FunctionDef name:_prepare_standalone_module_fx arg:model arg:qconfig_mapping arg:is_qat arg:example_inputs arg:prepare_custom_config arg:backend_config arguments arg arg arg arg arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "max", + "source_code": "def max(self, *, skipna: bool=True, **kwargs):\n nv.validate_minmax_axis(kwargs.get('axis', 0))\n nv.validate_max((), kwargs)\n self.check_for_ordered('max')\n if not len(self._codes):\n return self.dtype.na_value\n good = self._codes != -1\n if not good.all():\n if skipna and good.any():\n pointer = self._codes[good].max()\n else:\n return np.nan\n else:\n pointer = self._codes.max()\n return self._wrap_reduction_result(None, pointer)", + "docstring": "The maximum value of the object. Only ordered have a maximum! Raises ------ TypeError If the is not . Returns ------- max : the maximum of this , NA if array is empty", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", + "ast_data": "FunctionDef name:max arg:self arguments arg arg arg Call Call Call Call If Call Return return:yes Assign Compare If Call If BoolOp Call Assign Call Return return:yes Assign Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "_get_default_index_names", + "source_code": "def _get_default_index_names(self, names: Hashable | Sequence[Hashable] | None=None, default=None) -> list[Hashable]:\n from pandas.core.indexes.multi import MultiIndex\n if names is not None:\n if isinstance(names, (int, str)):\n names = [names]\n if not isinstance(names, list) and names is not None:\n raise ValueError('Index names must be str or 1-dimensional list')\n if not names:\n if isinstance(self, MultiIndex):\n names = com.fill_missing_names(self.names)\n else:\n names = [default] if self.name is None else [self.name]\n return names", + "docstring": "Get names of index. Parameters ---------- names : int, str or 1-dimensional list, default None Index names to set. default : str Default name of index. Raises ------ TypeError if names not str or list-like", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\base.py", + "ast_data": "FunctionDef name:_get_default_index_names arg:self arg:names arg:default arguments arg arg arg If Compare If Call Assign If BoolOp Call Compare Raise Call If If Call Assign Call Assign Compare Return return:yes" + }, + { + "library": "pandas", + "name": "loads", + "source_code": "def loads(bytes_object: bytes, *, fix_imports: bool=True, encoding: str='ASCII', errors: str='strict') -> Any:\n fd = io.BytesIO(bytes_object)\n return Unpickler(fd, fix_imports=fix_imports, encoding=encoding, errors=errors).load()", + "docstring": "Analogous to pickle._loads.", + "type": "function", + "file_path": "pandas\\pandas\\compat\\pickle_compat.py", + "ast_data": "FunctionDef name:loads arg:bytes_object arguments arg arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "InlineForeignKeyField", + "source_code": "class InlineForeignKeyField(Field):\n widget = HiddenInput\n default_error_messages = {'invalid_choice': _('The inline value did not match the parent instance.')}\n\n def __init__(self, parent_instance, *args, pk_field=False, to_field=None, **kwargs):\n self.parent_instance = parent_instance\n self.pk_field = pk_field\n self.to_field = to_field\n if self.parent_instance is not None:\n if self.to_field:\n kwargs['initial'] = getattr(self.parent_instance, self.to_field)\n else:\n kwargs['initial'] = self.parent_instance.pk\n kwargs['required'] = False\n super().__init__(*args, **kwargs)\n\n def clean(self, value):\n if value in self.empty_values:\n if self.pk_field:\n return None\n return self.parent_instance\n if self.to_field:\n orig = getattr(self.parent_instance, self.to_field)\n else:\n orig = self.parent_instance.pk\n if str(value) != str(orig):\n raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')\n return self.parent_instance\n\n def has_changed(self, initial, data):\n return False", + "docstring": "A basic integer field that deals with validating the given value to a given parent instance in an inline.", + "type": "class", + "file_path": "django\\django\\forms\\models.py", + "ast_data": "ClassDef name:InlineForeignKeyField Assign Assign Call FunctionDef name:__init__ arg:self arg:parent_instance arguments arg arg arg arg arg arg Assign Assign Assign If Compare If Assign Call Assign Assign Call Call FunctionDef name:clean arg:self arg:value arguments arg arg If Compare If Return return:no Return return:yes If Assign Call Assign If Compare Call Call Raise Call Return return:yes FunctionDef name:has_changed arg:self arg:initial arg:data arguments arg arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "cpp_string", + "source_code": "def cpp_string(s: str) -> str:\n s = s.replace('\\\\', '\\\\\\\\')\n s = s.replace('\"', '\\\\\"')\n s = s.replace('\\x07', '\\\\a')\n s = s.replace('\\x08', '\\\\b')\n s = s.replace('\\x0c', '\\\\f')\n s = s.replace('\\n', '\\\\n')\n s = s.replace('\\x0b', '\\\\v')\n s = s.replace('\\t', '\\\\t')\n return f'\"{s}\"'", + "docstring": "Convert a python string into a c++ string literal", + "type": "function", + "file_path": "pytorch\\torchgen\\gen.py", + "ast_data": "FunctionDef name:cpp_string arg:s arguments arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_eager_fill", + "source_code": "def _eager_fill(dims, value, ctx):\n attr_t = value.dtype.as_datatype_enum\n dims = convert_to_eager_tensor(dims, ctx, dtypes.int32)\n inputs_flat = [dims, value]\n attrs = ('T', attr_t, 'index_type', types_pb2.DT_INT32)\n [result] = execute.execute(b'Fill', 1, inputs=inputs_flat, attrs=attrs, ctx=ctx)\n return result", + "docstring": "Eager-only version of Fill op; requires value is an eager Tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\constant_op.py", + "ast_data": "FunctionDef name:_eager_fill arg:dims arg:value arg:ctx arguments arg arg arg Assign Assign Call Assign Assign Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "legval2d", + "source_code": "def legval2d(x, y, c):\n return pu._valnd(legval, c, x, y)", + "docstring": "Evaluate a 2-D Legendre series at points (x, y). This function returns the values: .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) The parameters and are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either and or their elements must support multiplication and addition both with themselves and with the elements of . If is a 1-D array a one is implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points `xyxycxy`. See Also -------- legval, leggrid2d, legval3d, leggrid3d", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\legendre.py", + "ast_data": "FunctionDef name:legval2d arg:x arg:y arg:c arguments arg arg arg Return return:yes Call" + }, + { + "library": "pygame", + "name": "layer", + "source_code": "@property\ndef layer(self):\n return self._layer", + "docstring": "Dynamic, read only property for protected _layer attribute. This will get the _layer variable if it exists. If you try to get it before it is set it will raise an attribute error. Layer property can only be set before the sprite is added to a group, after that it is read only and a sprite's layer in a group should be set via the group's change_layer() method. :return: layer as an int, or raise AttributeError.", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:layer arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "pose", + "source_code": "@property\ndef pose(self) -> Se2 | Se3:\n return self._dst_from_src", + "docstring": "Pose from source frame to destination frame .", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\pose.py", + "ast_data": "FunctionDef name:pose arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "celu", + "source_code": "def celu(input: Tensor, alpha: float=1.0, inplace: bool=False) -> Tensor:\n if has_torch_function_unary(input):\n return handle_torch_function(celu, (input,), input, alpha=alpha, inplace=inplace)\n if inplace:\n result = torch.celu_(input, alpha)\n else:\n result = torch.celu(input, alpha)\n return result", + "docstring": "celu(input, alpha=1., inplace=False) -> Tensor Applies element-wise, :math:. See :class: for more details.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:celu arg:input arg:alpha arg:inplace arguments arg arg arg If Call Return return:yes Call If Assign Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "__from_arrow__", + "source_code": "def __from_arrow__(self, array: pyarrow.Array | pyarrow.ChunkedArray) -> BooleanArray:\n import pyarrow\n if array.type != pyarrow.bool_() and (not pyarrow.types.is_null(array.type)):\n raise TypeError(f'Expected array of boolean type, got {array.type} instead')\n if isinstance(array, pyarrow.Array):\n chunks = [array]\n length = len(array)\n else:\n chunks = array.chunks\n length = array.length()\n if pyarrow.types.is_null(array.type):\n mask = np.ones(length, dtype=bool)\n data = np.empty(length, dtype=bool)\n return BooleanArray(data, mask)\n results = []\n for arr in chunks:\n buflist = arr.buffers()\n data = pyarrow.BooleanArray.from_buffers(arr.type, len(arr), [None, buflist[1]], offset=arr.offset).to_numpy(zero_copy_only=False)\n if arr.null_count != 0:\n mask = pyarrow.BooleanArray.from_buffers(arr.type, len(arr), [None, buflist[0]], offset=arr.offset).to_numpy(zero_copy_only=False)\n mask = ~mask\n else:\n mask = np.zeros(len(arr), dtype=bool)\n bool_arr = BooleanArray(data, mask)\n results.append(bool_arr)\n if not results:\n return BooleanArray(np.array([], dtype=np.bool_), np.array([], dtype=np.bool_))\n else:\n return BooleanArray._concat_same_type(results)", + "docstring": "Construct BooleanArray from pyarrow Array/ChunkedArray.", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\boolean.py", + "ast_data": "FunctionDef name:__from_arrow__ arg:self arg:array arguments arg arg If BoolOp Compare Call Call Raise Call If Call Assign Assign Call Assign Assign Call If Call Assign Call Assign Call Return return:yes Call Assign For Assign Call Assign Call Call Call If Compare Assign Call Call Call Assign Assign Call Call Assign Call Call If Return return:yes Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "ValueMutationExisting", + "source_code": "class ValueMutationExisting(MutationType):\n is_modified: bool\n\n def __init__(self, is_modified: bool=False):\n super().__init__(SourceType.Existing)\n self.is_modified = is_modified", + "docstring": "This case of VariableTracker.mutation_type marker indicates 1. Dynamo allows mutation on the value itself (rather than its attributes). 2. The value exists before Dynamo tracing started. For instance, Dynamo could model a pre-existing list with this marker, indicating that if we encounter mutations to this list, we need to buffer and re-apply those mutations after the graph runs, since the list might be used afterwards in Python.", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py", + "ast_data": "ClassDef name:ValueMutationExisting FunctionDef name:__init__ arg:self arg:is_modified arguments arg arg Call Call Assign" + }, + { + "library": "sphinx", + "name": "get_matching_files", + "source_code": "def get_matching_files(dirname: str | os.PathLike[str], include_patterns: Iterable[str]=('**',), exclude_patterns: Iterable[str]=()) -> Iterator[str]:\n dirname = Path(dirname).resolve()\n exclude_matchers = compile_matchers(exclude_patterns)\n include_matchers = compile_matchers(include_patterns)\n for root, dirs, files in os.walk(dirname, followlinks=True):\n relative_root = os.path.relpath(root, dirname)\n if relative_root == '.':\n relative_root = ''\n relative_root_path = Path(relative_root)\n included_files = []\n for entry in sorted(files):\n entry = _unicode_nfc((relative_root_path / entry).as_posix())\n keep = False\n for matcher in include_matchers:\n if matcher(entry):\n keep = True\n break\n for matcher in exclude_matchers:\n if matcher(entry):\n keep = False\n break\n if keep:\n included_files.append(entry)\n filtered_dirs = []\n for dir_name in sorted(dirs):\n normalised = _unicode_nfc((relative_root_path / dir_name).as_posix())\n for matcher in exclude_matchers:\n if matcher(normalised):\n break\n else:\n filtered_dirs.append(dir_name)\n dirs[:] = filtered_dirs\n yield from included_files", + "docstring": "Get all file names in a directory, recursively. Filter file names by the glob-style include_patterns and exclude_patterns. The default values include all files (\"**\") and exclude nothing (\"\"). Only files matching some pattern in *include_patterns* are included, and exclusions from *exclude_patterns* take priority over inclusions.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\matching.py", + "ast_data": "FunctionDef name:get_matching_files arg:dirname arg:include_patterns arg:exclude_patterns arguments arg arg arg Assign Call Call Assign Call Assign Call For Call Assign Call If Compare Assign Assign Call Assign For Call Assign Call Call Assign For If Call Assign For If Call Assign If Call Assign For Call Assign Call Call For If Call Call Assign" + }, + { + "library": "tensorflow", + "name": "get_barrier", + "source_code": "@tf_export('__internal__.distribute.multi_process_runner.get_barrier', v1=[])\ndef get_barrier():\n if _barrier is None:\n raise ValueError('barrier is not defined. It is likely because you are calling get_barrier() in the main process. get_barrier() can only be called in the subprocesses.')\n return _barrier", + "docstring": "Returns a for . returns a object which can be used within of to wait with call until all other tasks have also reached the call, before they can proceed individually. Note that all tasks (subprocesses) have to reach call to proceed. Currently it is not supported to block on only a subset of tasks in the cluster. Example: Returns: A for .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py", + "ast_data": "FunctionDef name:get_barrier arguments If Compare Raise Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "graph_dumper_aot", + "source_code": "def graph_dumper_aot(current_name, folder_name, dump_example_input=False):\n global graph_index\n graph_index = 0\n return partial(_save_fx_default, current_name, folder_name, dump_example_input)", + "docstring": "Dump the forward, backward, and joint computation graph. Example Usage: save_fx_func = graph_dumper_aot(current_name, folder_name, dump_example_input = False) optimize_ctx = torchdynamo.optimize( save_fx_func ) with torch.enable_grad(): with optimize_ctx: result = forward_and_backward_pass(model, example_inputs)", + "type": "function", + "file_path": "pytorch\\torch\\_functorch\\compilers.py", + "ast_data": "FunctionDef name:graph_dumper_aot arg:current_name arg:folder_name arg:dump_example_input arguments arg arg arg Assign Return return:yes Call" + }, + { + "library": "pytorch", + "name": "__str__", + "source_code": "def __str__(self) -> str:\n self.real_recompile()\n return super().__str__()", + "docstring": "str(GraphModule) will access the _code attribute. Make sure recompile happens so _code attribute is available.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\_lazy_graph_module.py", + "ast_data": "FunctionDef name:__str__ arg:self arguments arg Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_init_from_local_shards_and_global_metadata", + "source_code": "@classmethod\ndef _init_from_local_shards_and_global_metadata(cls, local_shards: list[Shard], sharded_tensor_metadata: ShardedTensorMetadata, sharding_spec=None) -> ShardedTensorBase:\n shards_metadata = sharded_tensor_metadata.shards_metadata\n tensor_properties = sharded_tensor_metadata.tensor_properties\n if len(shards_metadata) == 0:\n raise ValueError('shards_metadata must not be empty!')\n if tensor_properties.layout != torch.strided:\n raise ValueError('Only torch.strided layout is currently supported')\n if sharding_spec is None:\n spec = shard_spec._infer_sharding_spec_from_shards_metadata(shards_metadata)\n else:\n spec = sharding_spec\n sharded_tensor_base = ShardedTensorBase.__new__(ShardedTensor, spec, sharded_tensor_metadata.size, dtype=tensor_properties.dtype, layout=tensor_properties.layout, pin_memory=tensor_properties.pin_memory, requires_grad=tensor_properties.requires_grad)\n validate_non_overlapping_shards_metadata(shards_metadata)\n check_tensor(shards_metadata, list(sharded_tensor_metadata.size))\n sharded_tensor_base._local_shards = local_shards\n return sharded_tensor_base", + "docstring": "Initialize a ShardedTensorBase with local shards and a global ShardedTensorMetadata built on each rank. Warning: This API is experimental and subject to change. It does not do cross rank validations, and fully rely on the user for the correctness of sharded_tensor_metadata on each rank", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\api.py", + "ast_data": "FunctionDef name:_init_from_local_shards_and_global_metadata arg:cls arg:local_shards arg:sharded_tensor_metadata arg:sharding_spec arguments arg arg arg arg Assign Assign If Compare Call Raise Call If Compare Raise Call If Compare Assign Call Assign Assign Call Call Call Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_tensor_from_node", + "source_code": "def get_tensor_from_node(node):\n with ops.init_scope():\n if getattr(node, 'is_distributed_variable', False):\n return node\n elif getattr(node, 'is_distributed_table', False):\n return node\n elif getattr(node, 'is_sharded_variable', False):\n return node\n elif resource_variable_ops.is_resource_variable(node):\n return node.handle\n elif isinstance(node, asset.Asset):\n return node.asset_path\n elif tensor_util.is_tf_type(node):\n return node\n elif isinstance(node, resource.CapturableResource):\n return node.resource_handle\n raise ValueError(f'Cannot convert node {node} to tensor.')", + "docstring": "Resolves a saved model graph node into a tensor to be captured. Args: node: a tensor, variable, or resource to be resolved into a capturable tensor Returns: A list of tensors. Raises: ValueError: if the node cannot be converted into a tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\restore_captures.py", + "ast_data": "FunctionDef name:get_tensor_from_node arg:node arguments arg With Call If Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes If Call Return return:yes Raise Call" + }, + { + "library": "pytorch", + "name": "_get_scope_name", + "source_code": "def _get_scope_name(scoped_name: str) -> tuple[str, str]:\n if '.' in scoped_name:\n scope, name = scoped_name.rsplit('.', 1)\n else:\n scope, name = ('', scoped_name)\n return (scope, name)", + "docstring": "Get the scope and name of a node. Examples:: >>> _get_scope_name('') ('', '') >>> _get_scope_name('true_graph') ('', 'true_graph') >>> _get_scope_name('true_graph.false_graph') ('true_graph', 'false_graph') >>> _get_scope_name('true_graph.false_graph.some_graph') ('true_graph.false_graph', 'some_graph') Args: scoped_name: The scoped name of the node. Returns: (scope, name)", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_core.py", + "ast_data": "FunctionDef name:_get_scope_name arg:scoped_name arguments arg If Compare Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "concat", + "source_code": "def concat(self, name=None):\n value, _ = gen_data_flow_ops.tensor_array_concat_v3(handle=self._handle, flow_in=self._flow, dtype=self._dtype, name=name, element_shape_except0=self.element_shape[1:])\n if self.element_shape:\n dim0 = None\n if self._infer_shape:\n size = tensor_util.constant_value(self.size())\n if size is not None and self.element_shape[0] is not None:\n dim0 = size * self.element_shape[0]\n value.set_shape([dim0] + self.element_shape.dims[1:])\n return value", + "docstring": "See TensorArray.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:concat arg:self arg:name arguments arg arg Assign Call If Assign If Assign Call Call If BoolOp Compare Compare Assign Call Return return:yes" + }, + { + "library": "authlib", + "name": "prepare_grant_uri", + "source_code": "def prepare_grant_uri(uri, client_id, response_type, redirect_uri=None, scope=None, state=None, **kwargs):\n params = [('response_type', response_type), ('client_id', client_id)]\n if redirect_uri:\n params.append(('redirect_uri', redirect_uri))\n if scope:\n params.append(('scope', list_to_scope(scope)))\n if state:\n params.append(('state', state))\n for k in kwargs:\n if kwargs[k] is not None:\n params.append((to_unicode(k), kwargs[k]))\n return add_params_to_uri(uri, params)", + "docstring": "Prepare the authorization grant request URI. The client constructs the request URI by adding the following parameters to the query component of the authorization endpoint URI using the `Section 2.2Section 3.1.2Section 3.3Section 10.12Section 2.2Section 3.1.2Section 3.3section 10.12`:", + "type": "function", + "file_path": "authlib\\authlib\\oauth2\\rfc6749\\parameters.py", + "ast_data": "FunctionDef name:prepare_grant_uri arg:uri arg:client_id arg:response_type arg:redirect_uri arg:scope arg:state arguments arg arg arg arg arg arg arg Assign If Call If Call Call If Call For If Compare Call Call Return return:yes Call" + }, + { + "library": "pandas", + "name": "all", + "source_code": "def all(self, *, skipna: bool=True, **kwargs) -> bool | NAType:\n return self._reduce('all', skipna=skipna, **kwargs)", + "docstring": "Return whether all elements are truthy. Returns True unless there is at least one element that is falsey. By default, NAs are skipped. If `Kleene logic skipnaskipnapandas.NA` is True or False influences the result): >>> pd.array([True, True, pd.NA], dtype=\"boolean[pyarrow]\").all(skipna=False) >>> pd.array([1, 1, pd.NA], dtype=\"boolean[pyarrow]\").all(skipna=False) >>> pd.array([True, False, pd.NA], dtype=\"boolean[pyarrow]\").all(skipna=False) False >>> pd.array([1, 0, pd.NA], dtype=\"boolean[pyarrow]\").all(skipna=False) False", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py", + "ast_data": "FunctionDef name:all arg:self arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "append", + "source_code": "def append(self, module: Module) -> Self:\n self.add_module(str(len(self)), module)\n return self", + "docstring": "Append a given module to the end of the list. Args: module (nn.Module): module to append", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\container.py", + "ast_data": "FunctionDef name:append arg:self arg:module arguments arg arg Call Call Call Return return:yes" + }, + { + "library": "sphinx", + "name": "Highlight", + "source_code": "class Highlight(SphinxDirective):\n has_content = False\n required_arguments = 1\n optional_arguments = 0\n final_argument_whitespace = False\n option_spec: ClassVar[OptionSpec] = {'force': directives.flag, 'linenothreshold': directives.positive_int}\n\n def run(self) -> list[Node]:\n language = self.arguments[0].strip()\n linenothreshold = self.options.get('linenothreshold', sys.maxsize)\n force = 'force' in self.options\n self.env.current_document.highlight_language = language\n return [addnodes.highlightlang(lang=language, force=force, linenothreshold=linenothreshold)]", + "docstring": "Directive to set the highlighting language for code blocks, as well as the threshold for line numbers.", + "type": "class", + "file_path": "sphinx\\sphinx\\directives\\code.py", + "ast_data": "ClassDef name:Highlight Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg Assign Call Assign Call Assign Compare Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_known_len_tf_for_stmt", + "source_code": "def _known_len_tf_for_stmt(iter_, extra_test, body, get_state, set_state, symbol_names, opts):\n n = py_builtins.len_(iter_)\n ta = tensor_array_ops.TensorArray(iter_.dtype, size=n)\n iter_ = ta.unstack(iter_)\n iterate_index = 0\n\n def aug_get_state():\n return (iterate_index,) + get_state()\n\n def aug_set_state(aug_loop_vars):\n nonlocal iterate_index\n iterate_index, *loop_vars = aug_loop_vars\n set_state(loop_vars)\n\n def aug_body():\n nonlocal iterate_index\n body(iter_.read(iterate_index))\n iterate_index += 1\n\n def aug_test():\n main_test = iterate_index < n\n if extra_test is not None:\n return tf_cond.cond(main_test, extra_test, lambda: False)\n return main_test\n _add_max_iterations_hint(opts, n)\n _tf_while_stmt(aug_test, aug_body, aug_get_state, aug_set_state, ('',) + symbol_names, opts)", + "docstring": "Overload of for_stmt that iterates over TF entities that admit a length.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py", + "ast_data": "FunctionDef name:_known_len_tf_for_stmt arg:iter_ arg:extra_test arg:body arg:get_state arg:set_state arg:symbol_names arg:opts arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign FunctionDef name:aug_get_state arguments Return return:yes Call FunctionDef name:aug_set_state arg:aug_loop_vars arguments arg Assign Call FunctionDef name:aug_body arguments Call Call FunctionDef name:aug_test arguments Assign Compare If Compare Return return:yes Call arguments Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_get_unflat_views_aligned", + "source_code": "@no_type_check\ndef _get_unflat_views_aligned(self, tensor: Optional[Tensor]=None) -> list[Tensor]:\n flat_param = self.flat_param\n if tensor is None:\n tensor = flat_param\n splits: list[Tensor] = torch.split(tensor, flat_param._numels_with_padding, dim=0)\n idx = 0\n views: list[Tensor] = []\n for split, is_padding in zip(splits, flat_param._is_padding_mask):\n if is_padding:\n continue\n views.append(_ext_post_unflatten_transform(split.view(flat_param._shapes[idx]) if flat_param._contiguities[idx] else split.as_strided(flat_param._shapes[idx], flat_param._strides[idx]), flat_param._param_extensions[idx], self._fsdp_extension))\n idx += 1\n return views", + "docstring": "Return unflattened `_get_unflat_views_unaligned` placeholders representing padding for alignment, which may incur slightly more CPU overhead.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py", + "ast_data": "FunctionDef name:_get_unflat_views_aligned arg:self arg:tensor arguments arg arg Assign If Compare Assign Call Assign For Call If Call Call Call Call Return return:yes" + }, + { + "library": "authlib", + "name": "validate_at_hash", + "source_code": "def validate_at_hash(self):\n access_token = self.params.get('access_token')\n at_hash = self.get('at_hash')\n if at_hash and access_token:\n if not _verify_hash(at_hash, access_token, self.header['alg']):\n raise InvalidClaimError('at_hash')", + "docstring": "OPTIONAL. Access Token hash value. Its value is the base64url encoding of the left-most half of the hash of the octets of the ASCII representation of the access_token value, where the hash algorithm used is the hash algorithm used in the alg Header Parameter of the ID Token's JOSE Header. For instance, if the alg is RS256, hash the access_token value with SHA-256, then take the left-most 128 bits and base64url encode them. The at_hash value is a case sensitive string.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\core\\claims.py", + "ast_data": "FunctionDef name:validate_at_hash arg:self arguments arg Assign Call Assign Call If BoolOp If Call Raise Call" + }, + { + "library": "pytorch", + "name": "_identify_inputs_as_arguments", + "source_code": "def _identify_inputs_as_arguments(self, entry):\n arguments: set[str] = set()\n for block in entry.blocks():\n for block_node in block.nodes():\n for block_node_in in block_node.inputs():\n if block_node_in.debugName() in self.name_to_node and block_node_in.debugName() not in self.name_to_attribute_fqn:\n arguments.add(block_node_in.debugName())\n arguments = arguments.union(self._identify_inputs_as_arguments(block_node))\n return arguments", + "docstring": "Identify inputs from the innermost sub-block. This is needed for nested sub-blocks when the input is hidden in the nested sub-block. E.g., example IR of input is hidden in the nested sub-block. Graph[x.1] %1 = ... Block[] Block[x.1] %2 = x.1 ...", + "type": "method", + "file_path": "pytorch\\torch\\_export\\converter.py", + "ast_data": "FunctionDef name:_identify_inputs_as_arguments arg:self arg:entry arguments arg arg Call For Call For Call For Call If BoolOp Compare Call Compare Call Call Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_step_fn", + "source_code": "def _step_fn(ctx, inputs):\n if isinstance(inputs, (tuple, list)) and len(inputs) == 2:\n inputs, targets = inputs\n else:\n targets = None\n if isinstance(inputs, dict):\n inputs = [inputs[input_name] for input_name in model._feed_input_names]\n _build_model(strategy, model, mode, inputs, targets)\n grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args = strategy.extended.call_for_each_replica(_per_replica_execution_function, args=(dist_utils.get_distributed_model(model, mode), mode))\n all_inputs, all_outputs, all_updates, all_session_args = dist_utils.unwrap_values(strategy, grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args)\n combined_fn = backend.function(all_inputs, all_outputs, updates=all_updates, name='distributed_' + str(mode) + '_function', **all_session_args)\n for label, output in zip(output_labels, combined_fn.outputs):\n if label == 'loss':\n reduce_op = ds_reduce_util.ReduceOp.SUM\n else:\n reduce_op = ds_reduce_util.ReduceOp.MEAN\n ctx.set_last_step_output(label, output, reduce_op)\n return combined_fn.updates_op", + "docstring": "A step fn that returns update ops.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_distributed_v1.py", + "ast_data": "FunctionDef name:_step_fn arg:ctx arg:inputs arguments arg arg If BoolOp Call Compare Call Assign Assign If Call Assign Call Assign Call Call Assign Call Assign Call Call For Call If Compare Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "WatchOptions", + "source_code": "class WatchOptions:\n\n def __init__(self, debug_ops=None, node_name_regex_allowlist=None, op_type_regex_allowlist=None, tensor_dtype_regex_allowlist=None, tolerate_debug_op_creation_failures=False):\n if debug_ops:\n self.debug_ops = debug_ops\n else:\n self.debug_ops = ['DebugIdentity']\n self.node_name_regex_allowlist = node_name_regex_allowlist\n self.op_type_regex_allowlist = op_type_regex_allowlist\n self.tensor_dtype_regex_allowlist = tensor_dtype_regex_allowlist\n self.tolerate_debug_op_creation_failures = tolerate_debug_op_creation_failures\n\n def __repr__(self):\n return 'WatchOptions(debug_ops=%r, node_name_regex_allowlist=%r, op_type_regex_allowlist=%r, tensor_dtype_regex_allowlist=%r, tolerate_debug_op_creation_failures=%r)' % (self.debug_ops, self.node_name_regex_allowlist, self.op_type_regex_allowlist, self.tensor_dtype_regex_allowlist, self.tolerate_debug_op_creation_failures)", + "docstring": "Type for return values of watch_fn.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py", + "ast_data": "ClassDef name:WatchOptions FunctionDef name:__init__ arg:self arg:debug_ops arg:node_name_regex_allowlist arg:op_type_regex_allowlist arg:tensor_dtype_regex_allowlist arg:tolerate_debug_op_creation_failures arguments arg arg arg arg arg arg If Assign Assign Assign Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, forward, inverse):\n super().__init__()\n if callable(forward) and callable(inverse):\n self._forward = forward\n self._inverse = inverse\n else:\n raise ValueError('arguments to FuncTransform must be functions')", + "docstring": "Parameters ---------- forward : callable The forward function for the transform. This function must have an inverse and, for best behavior, be monotonic. It must have the signature:: def forward(values: array-like) -> array-like inverse : callable The inverse of the forward function. Signature as ``.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\scale.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:forward arg:inverse arguments arg arg arg Call Call If BoolOp Call Call Assign Assign Raise Call" + }, + { + "library": "pandas", + "name": "col_count", + "source_code": "@property\ndef col_count(self) -> int:\n return self.info.col_count", + "docstring": "Number of dataframe columns to be summarized.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\info.py", + "ast_data": "FunctionDef name:col_count arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "keras_tensor_to_placeholder", + "source_code": "def keras_tensor_to_placeholder(x):\n if isinstance(x, KerasTensor):\n return x._to_placeholder()\n else:\n return x", + "docstring": "Construct a graph placeholder to represent a KerasTensor when tracing.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py", + "ast_data": "FunctionDef name:keras_tensor_to_placeholder arg:x arguments arg If Call Return return:yes Call Return return:yes" + }, + { + "library": "pytorch", + "name": "BNReLU3d", + "source_code": "class BNReLU3d(_FusedModule):\n\n def __init__(self, batch_norm, relu):\n assert type_before_parametrizations(batch_norm) == BatchNorm3d and type_before_parametrizations(relu) == ReLU, f'Incorrect types for input modules{type_before_parametrizations(batch_norm)}{type_before_parametrizations(relu)}'\n super().__init__(batch_norm, relu)", + "docstring": "This is a sequential container which calls the BatchNorm 3d and ReLU modules. During quantization this will be replaced with the corresponding fused module.", + "type": "class", + "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py", + "ast_data": "ClassDef name:BNReLU3d FunctionDef name:__init__ arg:self arg:batch_norm arg:relu arguments arg arg arg BoolOp Compare Call Compare Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_total_size", + "source_code": "def _total_size(shape_values):\n result = 1\n for val in shape_values:\n result *= val\n return result", + "docstring": "Given list of tensor shape values, returns total size. If shape_values contains tensor values (which are results of array_ops.shape), then it returns a scalar tensor. If not, it returns an integer.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py", + "ast_data": "FunctionDef name:_total_size arg:shape_values arguments arg Assign For Return return:yes" + }, + { + "library": "tensorflow", + "name": "identity", + "source_code": "def identity(self):\n flow = array_ops.identity(self._flow)\n return build_ta_with_new_flow(self, flow)", + "docstring": "See TensorArray.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", + "ast_data": "FunctionDef name:identity arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "dict_to_dataclass", + "source_code": "def dict_to_dataclass(dict_obj: Dict[str, Any], dataclass_type: Type[T]) -> T:\n if not isinstance(dict_obj, dict):\n raise TypeError('Input conf must be dict')\n if not is_dataclass(dataclass_type):\n raise TypeError('dataclass_type must be a dataclass')\n field_types: dict[str, Any] = {f.name: f.type for f in fields(dataclass_type)}\n constructor_args = {}\n for key, value in dict_obj.items():\n if key in field_types and is_dataclass(field_types[key]):\n constructor_args[key] = dict_to_dataclass(value, field_types[key])\n else:\n constructor_args[key] = value\n return dataclass_type(**constructor_args)", + "docstring": "Recursively convert dictionaries to dataclass instances.", + "type": "function", + "file_path": "kornia\\kornia\\utils\\helpers.py", + "ast_data": "FunctionDef name:dict_to_dataclass arg:dict_obj arg:dataclass_type arguments arg arg If Call Raise Call If Call Raise Call Call Assign For Call If BoolOp Compare Call Assign Call Assign Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "update_position", + "source_code": "def update_position(self, loc):\n self.tick1line.set_ydata((loc,))\n self.tick2line.set_ydata((loc,))\n self.gridline.set_ydata((loc,))\n self.label1.set_y(loc)\n self.label2.set_y(loc)\n self._loc = loc\n self.stale = True", + "docstring": "Set the location of tick in data coords with scalar *loc*.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axis.py", + "ast_data": "FunctionDef name:update_position arg:self arg:loc arguments arg arg Call Call Call Call Call Assign Assign" + }, + { + "library": "pandas", + "name": "_can_hold_na", + "source_code": "@final\n@cache_readonly\ndef _can_hold_na(self) -> bool:\n dtype = self.dtype\n if isinstance(dtype, np.dtype):\n return dtype.kind not in 'iub'\n return dtype._can_hold_na", + "docstring": "Can we store NA values in this Block?", + "type": "method", + "file_path": "pandas\\pandas\\core\\internals\\blocks.py", + "ast_data": "FunctionDef name:_can_hold_na arg:self arguments arg Assign If Call Return return:yes Compare Return return:yes" + }, + { + "library": "pandas", + "name": "freq", + "source_code": "@property\ndef freq(self) -> BaseOffset:\n return self._freq", + "docstring": "The frequency object of this PeriodDtype. The property returns the object that represents the frequency of the PeriodDtype. This frequency specifies the interval (e.g., daily, monthly, yearly) associated with the Period type. It is essential for operations that depend on time-based calculations within a period index or series. See Also -------- Period : Represents a period of time. PeriodIndex : Immutable ndarray holding ordinal values indicating regular periods. PeriodDtype : An ExtensionDtype for Period data. date_range : Return a fixed frequency range of dates. Examples -------- >>> dtype = pd.PeriodDtype(freq=\"D\") >>> dtype.freq", + "type": "method", + "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", + "ast_data": "FunctionDef name:freq arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_TransposeTridiagonalMatrix", + "source_code": "def _TransposeTridiagonalMatrix(diags):\n diag = diags[..., 1, :]\n if diags.shape.is_fully_defined():\n zeros = array_ops.zeros(list(diags.shape[:-2]) + [1], dtype=diags.dtype)\n superdiag = array_ops.concat((diags[..., 2, 1:], zeros), axis=-1)\n subdiag = array_ops.concat((zeros, diags[..., 0, :-1]), axis=-1)\n else:\n rank = array_ops.rank(diags)\n zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)\n superdiag_pad = array_ops.concat((zeros, array_ops.constant([[0, 1]])), axis=0)\n superdiag = array_ops.pad(diags[..., 2, 1:], superdiag_pad)\n subdiag_pad = array_ops.concat((zeros, array_ops.constant([[1, 0]])), axis=0)\n subdiag = array_ops.pad(diags[..., 0, :-1], subdiag_pad)\n return array_ops_stack.stack([superdiag, diag, subdiag], axis=-2)", + "docstring": "Transposes a tridiagonal matrix. Args: diags: the diagonals of the input matrix in the compact form (see linalg_ops.tridiagonal_solve). Returns: Diagonals of the transposed matrix in the compact form.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py", + "ast_data": "FunctionDef name:_TransposeTridiagonalMatrix arg:diags arguments arg Assign If Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "bounds", + "source_code": "@property\ndef bounds(self):\n if self.change_dimensionality:\n return [self._bounds[0]] * self.N\n else:\n return self._bounds", + "docstring": "The lower/upper bounds to be used for minimizing the problem. This a list of (lower, upper) tuples that contain the lower and upper bounds for the problem. The problem should not be asked for evaluation outside these bounds. ``.", + "type": "method", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_benchmark.py", + "ast_data": "FunctionDef name:bounds arg:self arguments arg If Return return:yes Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_subplot_params", + "source_code": "def get_subplot_params(self, figure=None):\n hspace = self._hspace if self._hspace is not None else figure.subplotpars.hspace if figure is not None else mpl.rcParams['figure.subplot.hspace']\n wspace = self._wspace if self._wspace is not None else figure.subplotpars.wspace if figure is not None else mpl.rcParams['figure.subplot.wspace']\n figbox = self._subplot_spec.get_position(figure)\n left, bottom, right, top = figbox.extents\n return SubplotParams(left=left, right=right, bottom=bottom, top=top, wspace=wspace, hspace=hspace)", + "docstring": "Return a dictionary of subplot layout parameters.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py", + "ast_data": "FunctionDef name:get_subplot_params arg:self arg:figure arguments arg arg Assign Compare Compare Assign Compare Compare Assign Call Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_ForwardBackwardCall", + "source_code": "class _ForwardBackwardCall(object):\n __slots__ = ['_functions', '_inference_args', '_input_tangents', '_tape_watching']\n\n def __init__(self, functions, inference_args, input_tangents, tape_watching):\n self._functions = functions\n self._inference_args = inference_args\n self._input_tangents = input_tangents\n self._tape_watching = tape_watching\n\n def forward(self):\n forward_function = self._functions.forward(self._inference_args, self._input_tangents)\n return (forward_function, self._inference_args + self._input_tangents)\n\n def record(self, flat_outputs):\n if self._tape_watching and (not isinstance(flat_outputs, ops.Operation)) and (flat_outputs is not None):\n self._functions.record(flat_outputs, self._inference_args, self._input_tangents)", + "docstring": "Holds the state of a function call between execution and recording.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", + "ast_data": "ClassDef name:_ForwardBackwardCall Assign FunctionDef name:__init__ arg:self arg:functions arg:inference_args arg:input_tangents arg:tape_watching arguments arg arg arg arg arg Assign Assign Assign Assign FunctionDef name:forward arg:self arguments arg Assign Call Return return:yes FunctionDef name:record arg:self arg:flat_outputs arguments arg arg If BoolOp Call Compare Call" + }, + { + "library": "scipy", + "name": "_support_mask", + "source_code": "def _support_mask(self, x):\n residual = np.linalg.norm(x @ self.V, axis=-1)\n in_support = residual < self.eps\n return in_support", + "docstring": "Check whether x lies in the support of the distribution.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:_support_mask arg:self arg:x arguments arg arg Assign Call Assign Compare Return return:yes" + }, + { + "library": "pandas", + "name": "abs", + "source_code": "@final\ndef abs(self) -> Self:\n res_mgr = self._mgr.apply(np.abs)\n return self._constructor_from_mgr(res_mgr, axes=res_mgr.axes).__finalize__(self, name='abs')", + "docstring": "Return a Series/DataFrame with absolute numeric value of each element. This function only applies to elements that are all numeric. Returns ------- abs Series/DataFrame containing the absolute value of each element. See Also -------- numpy.absolute : Calculate the absolute value element-wise. Notes ----- For `\\sqrt{ a^2 + b^2 }StackOverflow `__). >>> df = pd.DataFrame( ... {\"a\": [4, 5, 6, 7], \"b\": [10, 20, 30, 40], \"c\": [100, 50, -30, -50]} ... ) >>> df a b c 0 4 10 100 1 5 20 50 2 6 30 -30 3 7 40 -50 >>> df.loc[(df.c - 43).abs().argsort()] a b c 1 5 20 50 0 4 10 100 2 6 30 -30 3 7 40 -50", + "type": "method", + "file_path": "pandas\\pandas\\core\\generic.py", + "ast_data": "FunctionDef name:abs arg:self arguments arg Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_make_callable", + "source_code": "def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session):\n callable_opts = config_pb2.CallableOptions()\n for x in feed_arrays:\n callable_opts.feed.append(x.name)\n if self.feed_dict:\n for key in sorted(self.feed_dict.keys()):\n callable_opts.feed.append(key.name)\n for x, y in zip(feed_symbols, symbol_vals):\n connection = callable_opts.tensor_connection.add()\n if x.dtype != y.dtype:\n y = math_ops.cast(y, dtype=x.dtype)\n from_tensor = _as_graph_element(y)\n if from_tensor is None:\n from_tensor = y\n connection.from_tensor = from_tensor.name\n connection.to_tensor = x.name\n for x in self.outputs + self.fetches:\n callable_opts.fetch.append(x.name)\n callable_opts.target.append(self.updates_op.name)\n if self.run_options:\n callable_opts.run_options.CopyFrom(self.run_options)\n callable_fn = session._make_callable_from_options(callable_opts)\n self._callable_fn = callable_fn\n self._feed_arrays = feed_arrays\n self._feed_symbols = feed_symbols\n self._symbol_vals = symbol_vals\n self._fetches = list(self.fetches)\n self._session = session", + "docstring": "Generates a callable that runs the graph. Args: feed_arrays: List of input tensors to be fed Numpy arrays at runtime. feed_symbols: List of input tensors to be fed symbolic tensors at runtime. symbol_vals: List of symbolic tensors to be fed to . session: Session to use to generate the callable. Returns: Function that runs the graph according to the above options.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", + "ast_data": "FunctionDef name:_make_callable arg:self arg:feed_arrays arg:feed_symbols arg:symbol_vals arg:session arguments arg arg arg arg arg Assign Call For Call If For Call Call Call For Call Assign Call If Compare Assign Call Assign Call If Compare Assign Assign Assign For Call Call If Call Assign Call Assign Assign Assign Assign Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "device_spec", + "source_code": "@property\ndef device_spec(self):\n return self._thread_local_data.device_spec", + "docstring": "Returns the device spec for the current thread.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:device_spec arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_compute_gradient", + "source_code": "def _compute_gradient(x, x_shape, dx, y, y_shape, dy, x_init_value=None, delta=0.001, extra_feed_dict=None):\n t = dtypes.as_dtype(x.dtype)\n allowed_types = [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]\n assert t.base_dtype in allowed_types, \"Don't support type %s for x\" % t.name\n t2 = dtypes.as_dtype(y.dtype)\n assert t2.base_dtype in allowed_types, \"Don't support type %s for y\" % t2.name\n if x_init_value is not None:\n i_shape = list(x_init_value.shape)\n assert list(x_shape) == i_shape, 'x_shape = %s, init_data shape = %s' % (x_shape, i_shape)\n x_data = x_init_value\n else:\n x_data = np.random.random_sample(x_shape).astype(t.as_numpy_dtype)\n if t.is_complex:\n x_data.imag = np.random.random_sample(x_shape)\n jacob_t = _compute_theoretical_jacobian(x, x_shape, x_data, dy, y_shape, dx, extra_feed_dict=extra_feed_dict)\n jacob_n = _compute_numeric_jacobian(x, x_shape, x_data, y, y_shape, delta, extra_feed_dict=extra_feed_dict)\n return (jacob_t, jacob_n)", + "docstring": "Computes the theoretical and numerical jacobian.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\gradient_checker.py", + "ast_data": "FunctionDef name:_compute_gradient arg:x arg:x_shape arg:dx arg:y arg:y_shape arg:dy arg:x_init_value arg:delta arg:extra_feed_dict arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Compare Assign Call Compare If Compare Assign Call Compare Call Assign Assign Call Call If Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scrapy", + "name": "add_to_list", + "source_code": "def add_to_list(self, name: _SettingsKeyT, item: Any) -> None:\n value: list[str] = self.getlist(name)\n if item not in value:\n self.set(name, [*value, item], self.getpriority(name) or 0)", + "docstring": "Append *item* to the :class: setting with the specified *name* if *item* is not already in that list. This change is applied regardless of the priority of the *name* setting. The setting priority is not affected by this change either.", + "type": "method", + "file_path": "scrapy\\scrapy\\settings\\__init__.py", + "ast_data": "FunctionDef name:add_to_list arg:self arg:name arg:item arguments arg arg arg Call If Compare Call BoolOp Call" + }, + { + "library": "tensorflow", + "name": "cholesky_solve", + "source_code": "@tf_export('linalg.cholesky_solve', v1=['linalg.cholesky_solve', 'cholesky_solve'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('cholesky_solve')\ndef cholesky_solve(chol, rhs, name=None):\n with ops.name_scope(name, 'cholesky_solve', [chol, rhs]):\n y = gen_linalg_ops.matrix_triangular_solve(chol, rhs, adjoint=False, lower=True)\n x = gen_linalg_ops.matrix_triangular_solve(chol, y, adjoint=True, lower=True)\n return x", + "docstring": "Solves systems of linear eqns , given Cholesky factorizations. Specifically, returns from , where , is the arg and is the arg. Args: chol: A . Must be or , shape is . Cholesky factorization of , e.g. . For that reason, only the lower triangular parts (including the diagonal) of the last two dimensions of are used. The strictly upper part is assumed to be zero and not accessed. rhs: A , same type as , shape is . name: A name to give this . Defaults to . Returns: Solution to , shape .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_ops.py", + "ast_data": "FunctionDef name:cholesky_solve arg:chol arg:rhs arg:name arguments arg arg arg With Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "django", + "name": "LabelCommand", + "source_code": "class LabelCommand(BaseCommand):\n label = 'label'\n missing_args_message = 'Enter at least one %s.'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.missing_args_message == LabelCommand.missing_args_message:\n self.missing_args_message = self.missing_args_message % self.label\n\n def add_arguments(self, parser):\n parser.add_argument('args', metavar=self.label, nargs='+')\n\n def handle(self, *labels, **options):\n output = []\n for label in labels:\n label_output = self.handle_label(label, **options)\n if label_output:\n output.append(label_output)\n return '\\n'.join(output)\n\n def handle_label(self, label, **options):\n raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method')", + "docstring": "A management command which takes one or more arbitrary arguments (labels) on the command line, and does something with each of them. Rather than implementing `` instead.", + "type": "class", + "file_path": "django\\django\\core\\management\\base.py", + "ast_data": "ClassDef name:LabelCommand Assign Assign FunctionDef name:__init__ arg:self arguments arg arg arg Call Call If Compare Assign FunctionDef name:add_arguments arg:self arg:parser arguments arg arg Call FunctionDef name:handle arg:self arguments arg arg arg Assign For Assign Call If Call Return return:yes Call FunctionDef name:handle_label arg:self arg:label arguments arg arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "OutputGraphGuardsState", + "source_code": "@dataclass\nclass OutputGraphGuardsState:\n local_scope: Scope\n global_scope: Scope\n torch_function_mode_stack: list[torch.overrides.TorchFunctionMode]\n guard_on_key_order: set[Source]\n input_source_to_sizes_strides: dict[Source, dict[str, Any]]\n dual_level: int\n functorch_layers: list[torch._functorch.pyfunctorch.FuncTorchInterpreter]\n current_device: Optional[torch.device]\n export: bool = False\n export_constraints: bool = False\n _guards: Optional[torch._guards.GuardsSet] = None\n _aotautograd_guards: Optional[list[torch._guards.GuardEnvExpr]] = None\n\n @property\n def shape_env(self):\n raise AssertionError(f\"shape_env shouldn't be accessed from {type(self)}\")\n\n @property\n def guards(self):\n return self._guards\n\n @property\n def aotautograd_guards(self):\n return self._aotautograd_guards", + "docstring": "A base class containing fields that are considered \"persistent\" when we want to save all the important state for reconstrucing guards in a different process. Normally we don't need to add states here, but we may have to when the information is needed to serialize the guards, so the fields here are supposed to be serializable as a requirement.", + "type": "class", + "file_path": "pytorch\\torch\\_dynamo\\output_graph.py", + "ast_data": "ClassDef name:OutputGraphGuardsState FunctionDef name:shape_env arg:self arguments arg Raise Call Call FunctionDef name:guards arg:self arguments arg Return return:yes FunctionDef name:aotautograd_guards arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "Min", + "source_code": "class Min(MinMaxBase, Application):\n zero = S.NegativeInfinity\n identity = S.Infinity\n\n def _eval_is_positive(self):\n return fuzzy_and((a.is_positive for a in self.args))\n\n def _eval_is_nonnegative(self):\n return fuzzy_and((a.is_nonnegative for a in self.args))\n\n def _eval_is_negative(self):\n return fuzzy_or((a.is_negative for a in self.args))", + "docstring": "Return, if possible, the minimum value of the list.", + "type": "class", + "file_path": "pytorch\\torch\\utils\\_sympy\\functions.py", + "ast_data": "ClassDef name:Min Assign Assign FunctionDef name:_eval_is_positive arg:self arguments arg Return return:yes Call FunctionDef name:_eval_is_nonnegative arg:self arguments arg Return return:yes Call FunctionDef name:_eval_is_negative arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_result_batchable_to_flat", + "source_code": "def _result_batchable_to_flat(result_batchable, result_flat_signature, batch_size):\n result_flat = []\n i = 0\n for spec in result_flat_signature:\n num_tensors = len(spec._flat_tensor_specs)\n result_flat.append(spec._batch(batch_size)._from_compatible_tensor_list(result_batchable[i:i + num_tensors]))\n i += num_tensors\n assert i == len(result_batchable)\n return result_flat", + "docstring": "Converts result_batchable -> result_flat.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\map_fn.py", + "ast_data": "FunctionDef name:_result_batchable_to_flat arg:result_batchable arg:result_flat_signature arg:batch_size arguments arg arg arg Assign Assign For Assign Call Call Call Call Compare Call Return return:yes" + }, + { + "library": "numpy", + "name": "_sub", + "source_code": "def _sub(c1, c2):\n [c1, c2] = as_series([c1, c2])\n if len(c1) > len(c2):\n c1[:c2.size] -= c2\n ret = c1\n else:\n c2 = -c2\n c2[:c1.size] += c1\n ret = c2\n return trimseq(ret)", + "docstring": "Helper function used to implement the `` functions.", + "type": "function", + "file_path": "numpy\\numpy\\polynomial\\polyutils.py", + "ast_data": "FunctionDef name:_sub arg:c1 arg:c2 arguments arg arg Assign Call If Compare Call Call Assign Assign Assign Return return:yes Call" + }, + { + "library": "scipy", + "name": "sum_duplicates", + "source_code": "def sum_duplicates(self) -> None:\n if self.has_canonical_format:\n return\n summed = self._sum_duplicates(self.coords, self.data)\n self.coords, self.data = summed\n self.has_canonical_format = True", + "docstring": "Eliminate duplicate entries by adding them together This is an *in place* operation", + "type": "method", + "file_path": "scipy\\scipy\\sparse\\_coo.py", + "ast_data": "FunctionDef name:sum_duplicates arg:self arguments arg If Return return:no Assign Call Assign Assign" + }, + { + "library": "authlib", + "name": "HMACAlgorithm", + "source_code": "class HMACAlgorithm(JWSAlgorithm):\n SHA256 = hashlib.sha256\n SHA384 = hashlib.sha384\n SHA512 = hashlib.sha512\n\n def __init__(self, sha_type):\n self.name = f'HS{sha_type}'\n self.description = f'HMAC using SHA-{sha_type}'\n self.hash_alg = getattr(self, f'SHA{sha_type}')\n\n def prepare_key(self, raw_data):\n return OctKey.import_key(raw_data)\n\n def sign(self, msg, key):\n op_key = key.get_op_key('sign')\n return hmac.new(op_key, msg, self.hash_alg).digest()\n\n def verify(self, msg, sig, key):\n op_key = key.get_op_key('verify')\n v_sig = hmac.new(op_key, msg, self.hash_alg).digest()\n return hmac.compare_digest(sig, v_sig)", + "docstring": "HMAC using SHA algorithms for JWS. Available algorithms: - HS256: HMAC using SHA-256 - HS384: HMAC using SHA-384 - HS512: HMAC using SHA-512", + "type": "class", + "file_path": "authlib\\authlib\\jose\\rfc7518\\jws_algs.py", + "ast_data": "ClassDef name:HMACAlgorithm Assign Assign Assign FunctionDef name:__init__ arg:self arg:sha_type arguments arg arg Assign Assign Assign Call FunctionDef name:prepare_key arg:self arg:raw_data arguments arg arg Return return:yes Call FunctionDef name:sign arg:self arg:msg arg:key arguments arg arg arg Assign Call Return return:yes Call Call FunctionDef name:verify arg:self arg:msg arg:sig arg:key arguments arg arg arg arg Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "eval", + "source_code": "def eval(self, session=None):\n raise NotImplementedError", + "docstring": "In a session, computes and returns the value of this variable. This is not a graph construction method, it does not add ops to the graph. This convenience method requires a session where the graph containing this variable has been launched. If no session is passed, the default session is used. See for more information on launching a graph and on sessions. Args: session: The session to use to evaluate this variable. If none, the default session is used. Returns: A numpy with a copy of the value of this variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:eval arg:self arg:session arguments arg arg Raise" + }, + { + "library": "scipy", + "name": "value", + "source_code": "def value(key: str) -> float:\n _check_obsolete(key)\n return physical_constants[key][0]", + "docstring": "Value in physical_constants indexed by key Parameters ---------- key : Python string Key in dictionary Returns ------- value : float Value in corresponding to Examples -------- >>> from scipy import constants >>> constants.value('elementary charge') 1.602176634e-19", + "type": "function", + "file_path": "scipy\\scipy\\constants\\_codata.py", + "ast_data": "FunctionDef name:value arg:key arguments arg Call Return return:yes" + }, + { + "library": "kornia", + "name": "__init__", + "source_code": "def __init__(self, rectified_left_camera: Tensor, rectified_right_camera: Tensor) -> None:\n self._check_stereo_camera(rectified_left_camera, rectified_right_camera)\n self.rectified_left_camera: Tensor = rectified_left_camera\n self.rectified_right_camera: Tensor = rectified_right_camera\n self.device = self.rectified_left_camera.device\n self.dtype = self.rectified_left_camera.dtype\n self._Q_matrix = self._init_Q_matrix()", + "docstring": "Class representing a horizontal stereo camera setup. Args: rectified_left_camera: The rectified left camera projection matrix of shape :math: rectified_right_camera: The rectified right camera projection matrix of shape :math:", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:rectified_left_camera arg:rectified_right_camera arguments arg arg arg Call Assign Assign Assign Call" + }, + { + "library": "scikit-learn", + "name": "_make_random_matrix", + "source_code": "@abstractmethod\ndef _make_random_matrix(self, n_components, n_features):\n pass", + "docstring": "Generate the random projection matrix. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. Returns ------- components : {ndarray, sparse matrix} of shape (n_components, n_features) The generated random matrix. Sparse matrix will be of CSR format.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\random_projection.py", + "ast_data": "FunctionDef name:_make_random_matrix arg:self arg:n_components arg:n_features arguments arg arg arg" + }, + { + "library": "seaborn", + "name": "_get_palette", + "source_code": "def _get_palette(self, data, hue, hue_order, palette):\n if hue is None:\n palette = color_palette(n_colors=1)\n else:\n hue_names = categorical_order(data[hue], hue_order)\n n_colors = len(hue_names)\n if palette is None:\n current_palette = utils.get_color_cycle()\n if n_colors > len(current_palette):\n colors = color_palette('husl', n_colors)\n else:\n colors = color_palette(n_colors=n_colors)\n elif isinstance(palette, dict):\n color_names = [palette[h] for h in hue_names]\n colors = color_palette(color_names, n_colors)\n else:\n colors = color_palette(palette, n_colors)\n palette = color_palette(colors, n_colors)\n return palette", + "docstring": "Get a list of colors for the hue variable.", + "type": "method", + "file_path": "seaborn\\seaborn\\axisgrid.py", + "ast_data": "FunctionDef name:_get_palette arg:self arg:data arg:hue arg:hue_order arg:palette arguments arg arg arg arg arg If Compare Assign Call Assign Call Assign Call If Compare Assign Call If Compare Call Assign Call Assign Call If Call Assign Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "get_model_perms", + "source_code": "def get_model_perms(self, request):\n return {'add': self.has_add_permission(request), 'change': self.has_change_permission(request), 'delete': self.has_delete_permission(request), 'view': self.has_view_permission(request)}", + "docstring": "Return a dict of all perms for this model. This dict has the keys `` mapping to the True/False for each of those actions.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:get_model_perms arg:self arg:request arguments arg arg Return return:yes Call Call Call Call" + }, + { + "library": "scipy", + "name": "Bohachevsky3", + "source_code": "class Bohachevsky3(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n self.global_optimum = [[0 for _ in range(self.N)]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n return x[0] ** 2 + 2 * x[1] ** 2 - 0.3 * cos(3 * pi * x[0] + 4 * pi * x[1]) + 0.3", + "docstring": "Bohachevsky 3 objective function. The Bohachevsky 3 [1]_ global optimization problem is a multimodal minimization problem defined as follows .. math:: f_{\\text{Bohachevsky}}(x) = \\sum_{i=1}^{n-1}\\left[x_i^2 + 2 x_{i+1}^2 - 0.3 \\cos(3 \\pi x_i) - 0.4 \\cos(4 \\pi x_{i + 1}) + 0.7 \\right] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: equation needs to be fixed up in the docstring. Jamil#19", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py", + "ast_data": "ClassDef name:Bohachevsky3 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "bootstrap_store_info", + "source_code": "@property\ndef bootstrap_store_info(self) -> Optional[RendezvousStoreInfo]:\n return self._bootstrap_store_info", + "docstring": "Store information that can used by trainer code to bootstrap distributed comms.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py", + "ast_data": "FunctionDef name:bootstrap_store_info arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "refine_heatmap", + "source_code": "def refine_heatmap(self, heatmap: Tensor, ratio: float=0.2, valid_thresh: float=0.01) -> Tensor:\n heatmap_values = heatmap[heatmap > valid_thresh]\n sorted_values = torch.sort(heatmap_values, descending=True)[0]\n top10_len = math.ceil(sorted_values.shape[0] * ratio)\n max20 = torch.mean(sorted_values[:top10_len])\n heatmap = torch.clamp(heatmap / max20, min=0.0, max=1.0)\n return heatmap", + "docstring": "Global heatmap refinement method.", + "type": "method", + "file_path": "kornia\\kornia\\feature\\sold2\\sold2_detector.py", + "ast_data": "FunctionDef name:refine_heatmap arg:self arg:heatmap arg:ratio arg:valid_thresh arguments arg arg arg arg Assign Compare Assign Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "__sizeof__", + "source_code": "def __sizeof__(self) -> int:\n memory_usage = getattr(self, 'memory_usage', None)\n if memory_usage:\n mem = memory_usage(deep=True)\n return int(mem if is_scalar(mem) else mem.sum())\n return super().__sizeof__()", + "docstring": "Generates the total memory usage for an object that returns either a value or Series of values", + "type": "method", + "file_path": "pandas\\pandas\\core\\base.py", + "ast_data": "FunctionDef name:__sizeof__ arg:self arguments arg Assign Call If Assign Call Return return:yes Call Call Call Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "tostring_argb", + "source_code": "def tostring_argb(self):\n return self.renderer.tostring_argb()", + "docstring": "Get the image as ARGB . must be called at least once before this function will work and to update the renderer for any subsequent changes to the Figure.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_agg.py", + "ast_data": "FunctionDef name:tostring_argb arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "variable_dtype", + "source_code": "@property\ndef variable_dtype(self):\n return self.dtype", + "docstring": "Alias of , the dtype of the weights.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", + "ast_data": "FunctionDef name:variable_dtype arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "shape_v2", + "source_code": "@dispatch.dispatch_for_api(array_ops.shape_v2)\ndef shape_v2(input: StructuredTensor, out_type=dtypes.int32, name=None) -> dynamic_ragged_shape.DynamicRaggedShape:\n del name\n return input._ragged_shape.with_dtype(out_type)", + "docstring": "Returns a DynamicRaggedShape containing the shape of the input.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py", + "ast_data": "FunctionDef name:shape_v2 arg:input arg:out_type arg:name arguments arg arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "_run", + "source_code": "def _run(self):\n sequence = list(range(len(self.sequence)))\n self._send_sequence()\n while True:\n if self.shuffle:\n random.shuffle(sequence)\n with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:\n for i in sequence:\n if self.stop_signal.is_set():\n return\n self.queue.put(executor.apply_async(get_index, (self.uid, i)), block=True)\n self._wait_queue()\n if self.stop_signal.is_set():\n return\n self.sequence.on_epoch_end()\n self._send_sequence()", + "docstring": "Submits request to the executor and queue the objects.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py", + "ast_data": "FunctionDef name:_run arg:self arguments arg Assign Call Call Call Call While If Call With Call Call For If Call Return return:no Call Call Call If Call Return return:no Call Call" + }, + { + "library": "matplotlib", + "name": "_gen_axes_patch", + "source_code": "def _gen_axes_patch(self):\n return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)", + "docstring": "Returns ------- Patch The patch used to draw the background of the Axes. It is also used as the clipping path for any data elements on the Axes. In the standard Axes, this is a rectangle, but in other projections it may not be. Notes ----- Intended to be overridden by new projection types.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", + "ast_data": "FunctionDef name:_gen_axes_patch arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "execution_mode", + "source_code": "@property\ndef execution_mode(self):\n return ASYNC if self.is_async() else SYNC", + "docstring": "Gets execution mode for current thread.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", + "ast_data": "FunctionDef name:execution_mode arg:self arguments arg Return return:yes Call" + }, + { + "library": "django", + "name": "create", + "source_code": "@classmethod\ndef create(cls, children=None, connector=None, negated=False):\n obj = Node(children, connector or cls.default, negated)\n obj.__class__ = cls\n return obj", + "docstring": "Create a new instance using Node() instead of __init__() as some subclasses, e.g. django.db.models.query_utils.Q, may implement a custom __init__() with a signature that conflicts with the one defined in Node.__init__().", + "type": "method", + "file_path": "django\\django\\utils\\tree.py", + "ast_data": "FunctionDef name:create arg:cls arg:children arg:connector arg:negated arguments arg arg arg arg Assign Call BoolOp Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_process_v1_graph_mode_tensor", + "source_code": "def _process_v1_graph_mode_tensor(self, op_type, tensor, debug_tensor, tensor_debug_mode):\n if op_type in ('Placeholder', 'PlaceholderWithDefault'):\n self._placeholder_to_debug_tensor[tensor] = debug_tensor\n return tensor\n elif tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR and op_type != 'Const':\n self._tensor_aliases[debug_tensor.name] = tensor.name\n return debug_tensor\n else:\n with self._symbolic_tensor_counter_lock:\n identity_name = 'tfdbg_identity_%d' % self._symbolic_tensor_counter\n identity = array_ops.identity(tensor, name=identity_name)\n identity.op._add_control_input(debug_tensor.op)\n self._tensor_aliases[identity.name] = tensor.name\n return identity", + "docstring": "For V1 graph mode, determine what tensor to output from callback. Args: op_type: Type of the op that outputs the original symbolic tensor. tensor: The original output symbolic tensor. debug_tensor: The debugger-instrumented tensor. tensor_debug_mode: Debug mode used, a tfdbg TensorDebugMode enum. Returns: A symbolic tensor to be returned by the dumping op_callback.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\dumping_callback.py", + "ast_data": "FunctionDef name:_process_v1_graph_mode_tensor arg:self arg:op_type arg:tensor arg:debug_tensor arg:tensor_debug_mode arguments arg arg arg arg arg If Compare Assign Return return:yes If BoolOp Compare Compare Assign Return return:yes With Assign Assign Call Call Assign Return return:yes" + }, + { + "library": "pygame", + "name": "remove_sprites_of_layer", + "source_code": "def remove_sprites_of_layer(self, layer_nr):\n sprites = self.get_sprites_from_layer(layer_nr)\n self.remove(*sprites)\n return sprites", + "docstring": "remove all sprites from a layer and return them as a list LayeredUpdates.remove_sprites_of_layer(layer_nr): return sprites", + "type": "method", + "file_path": "pygame\\src_py\\sprite.py", + "ast_data": "FunctionDef name:remove_sprites_of_layer arg:self arg:layer_nr arguments arg arg Assign Call Call Return return:yes" + }, + { + "library": "numpy", + "name": "setdiff1d", + "source_code": "@array_function_dispatch(_setdiff1d_dispatcher)\ndef setdiff1d(ar1, ar2, assume_unique=False):\n if assume_unique:\n ar1 = np.asarray(ar1).ravel()\n else:\n ar1 = unique(ar1)\n ar2 = unique(ar2)\n return ar1[_in1d(ar1, ar2, assume_unique=True, invert=True)]", + "docstring": "Find the set difference of two arrays. Return the unique values in that are not in . Parameters ---------- ar1 : array_like Input array. ar2 : array_like Input comparison array. assume_unique : bool If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. Returns ------- setdiff1d : ndarray 1D array of values in that are not in . The result is sorted when , but otherwise only sorted if the input is sorted. Examples -------- >>> import numpy as np >>> a = np.array([1, 2, 3, 2, 4, 1]) >>> b = np.array([3, 4, 5, 6]) >>> np.setdiff1d(a, b) array([1, 2])", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_arraysetops_impl.py", + "ast_data": "FunctionDef name:setdiff1d arg:ar1 arg:ar2 arg:assume_unique arguments arg arg arg If Assign Call Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "ModeKeyMap", + "source_code": "class ModeKeyMap(collections.abc.Mapping):\n\n def __init__(self, **kwargs):\n self._internal_dict = {}\n self._keys = []\n for key in kwargs:\n self._keys.append(key)\n dict_key = self._get_internal_key(key)\n if dict_key in self._internal_dict:\n raise ValueError('Error creating ModeKeyMap. Multiple keys/values found for {} mode.'.format(dict_key))\n self._internal_dict[dict_key] = kwargs[key]\n\n def _get_internal_key(self, key):\n if is_train(key):\n return KerasModeKeys.TRAIN\n if is_eval(key):\n return KerasModeKeys.TEST\n if is_predict(key):\n return KerasModeKeys.PREDICT\n raise ValueError('Invalid mode key: {}.'.format(key))\n\n def __getitem__(self, key):\n return self._internal_dict[self._get_internal_key(key)]\n\n def __iter__(self):\n return iter(self._keys)\n\n def __len__(self):\n return len(self._keys)", + "docstring": "Map using ModeKeys as keys. This class creates an immutable mapping from modes to values. For example, SavedModel export of Keras models use this to map modes to their corresponding MetaGraph tags/SignatureDef keys.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\mode_keys.py", + "ast_data": "ClassDef name:ModeKeyMap FunctionDef name:__init__ arg:self arguments arg arg Assign Assign For Call Assign Call If Compare Raise Call Call Assign FunctionDef name:_get_internal_key arg:self arg:key arguments arg arg If Call Return return:yes If Call Return return:yes If Call Return return:yes Raise Call Call FunctionDef name:__getitem__ arg:self arg:key arguments arg arg Return return:yes Call FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "MultiStepLR", + "source_code": "class MultiStepLR(LRScheduler):\n\n def __init__(self, optimizer: Optimizer, milestones: Iterable[int], gamma: float=0.1, last_epoch: int=-1) -> None:\n self.milestones = Counter(milestones)\n self.gamma = gamma\n super().__init__(optimizer, last_epoch)\n\n @override\n def get_lr(self) -> list[float]:\n _warn_get_lr_called_within_step(self)\n if self.last_epoch not in self.milestones:\n return [group['lr'] for group in self.optimizer.param_groups]\n return [group['lr'] * self.gamma ** self.milestones[self.last_epoch] for group in self.optimizer.param_groups]\n\n def _get_closed_form_lr(self):\n milestones = sorted(self.milestones.elements())\n return [base_lr * self.gamma ** bisect_right(milestones, self.last_epoch) for base_lr in self.base_lrs]", + "docstring": "Decays the learning rate of each parameter group by gamma once the number of epoch reaches one of the milestones. Notice that such decay can happen simultaneously with other changes to the learning rate from outside this scheduler. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. milestones (list): List of epoch indices. Must be increasing. gamma (float): Multiplicative factor of learning rate decay. Default: 0.1. last_epoch (int): The index of last epoch. Default: -1. Example: >>> # xdoctest: +SKIP >>> # Assuming optimizer uses lr = 0.05 for all groups >>> # lr = 0.05 if epoch >> # lr = 0.005 if 30 >> # lr = 0.0005 if epoch >= 80 >>> scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() .. image:: ../scripts/lr_scheduler_images/MultiStepLR.png", + "type": "class", + "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", + "ast_data": "ClassDef name:MultiStepLR FunctionDef name:__init__ arg:self arg:optimizer arg:milestones arg:gamma arg:last_epoch arguments arg arg arg arg arg Assign Call Assign Call Call FunctionDef name:get_lr arg:self arguments arg Call If Compare Return return:yes Return return:yes FunctionDef name:_get_closed_form_lr arg:self arguments arg Assign Call Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "depth_to_3d", + "source_code": "def depth_to_3d(depth: Tensor, camera_matrix: Tensor, normalize_points: bool=False) -> Tensor:\n KORNIA_CHECK_IS_TENSOR(depth)\n KORNIA_CHECK_IS_TENSOR(camera_matrix)\n KORNIA_CHECK_SHAPE(depth, ['B', '1', 'H', 'W'])\n KORNIA_CHECK_SHAPE(camera_matrix, ['B', '3', '3'])\n _, _, height, width = depth.shape\n points_2d: Tensor = create_meshgrid(height, width, normalized_coordinates=False)\n points_2d = points_2d.to(depth.device).to(depth.dtype)\n points_depth: Tensor = depth.permute(0, 2, 3, 1)\n camera_matrix_tmp: Tensor = camera_matrix[:, None, None]\n points_3d: Tensor = unproject_points(points_2d, points_depth, camera_matrix_tmp, normalize=normalize_points)\n return points_3d.permute(0, 3, 1, 2)", + "docstring": "Compute a 3d point per pixel given its depth value and the camera intrinsics. .. note:: This is an alternative implementation of that does not require the creation of a meshgrid. In future, we will support only this implementation. Args: depth: image tensor containing a depth value per pixel with shape :math:. camera_matrix: tensor containing the camera intrinsics with shape :math:. normalize_points: whether to normalise the pointcloud. This must be set to when the depth is represented as the Euclidean ray length from the camera position. Return: tensor with a 3d point per pixel of the same resolution as the input :math:. Example: >>> depth = torch.rand(1, 1, 4, 4) >>> K = torch.eye(3)[None] >>> depth_to_3d(depth, K).shape torch.Size([1, 3, 4, 4])", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\depth.py", + "ast_data": "FunctionDef name:depth_to_3d arg:depth arg:camera_matrix arg:normalize_points arguments arg arg arg Call Call Call Call Assign Call Assign Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "remove_padding_from_sc", + "source_code": "def remove_padding_from_sc(value_in_checkpoint: tensor.Tensor, variable_shape: tuple[int, int]) -> tensor.Tensor:\n checkpoint_value_shape = value_in_checkpoint.shape.as_list()\n is_init_value_padded = all([i >= j for i, j in zip(checkpoint_value_shape, variable_shape)])\n if not is_init_value_padded:\n return value_in_checkpoint\n begin = [0] * len(checkpoint_value_shape)\n return array_ops.slice(value_in_checkpoint, begin=begin, size=variable_shape)", + "docstring": "Removes padding, if any, from sparsecore checkpoint. Args: value_in_checkpoint: input tensor value, usually from checkpoint. variable_shape: Expected shape of tensor after removing padding. Returns: A slice of the input tensor to match the variable_shape if the variable shape is a valid slice if the input tensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_utils.py", + "ast_data": "FunctionDef name:remove_padding_from_sc arg:value_in_checkpoint arg:variable_shape arguments arg arg Assign Call Assign Call Compare Call If Return return:yes Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "values", + "source_code": "@property\ndef values(self):\n return self._values", + "docstring": "The non-zero values in the represented dense tensor. Returns: A 1-D Tensor of any data type.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py", + "ast_data": "FunctionDef name:values arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "log", + "source_code": "def log(self) -> Tensor:\n omega = self.r.log()\n theta = batched_dot_product(omega, omega).sqrt()\n t = self.t.data\n omega_hat = So3.hat(omega)\n omega_hat_sq = omega_hat @ omega_hat\n V_inv = eye(3, device=omega.device, dtype=omega.dtype) - 0.5 * omega_hat + ((1 - theta * (theta / 2).cos() / (2 * (theta / 2).sin())) / theta.pow(2))[..., None, None] * omega_hat_sq\n t = where(theta[..., None] != 0.0, (t[..., None, :] * V_inv).sum(-1), t)\n return concatenate((t, omega), -1)", + "docstring": "Convert elements of lie group to elements of lie algebra. Example: >>> from kornia.geometry.quaternion import Quaternion >>> q = Quaternion.identity() >>> Se3(q, torch.zeros(3)).log() tensor([0., 0., 0., 0., 0., 0.], grad_fn=)", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py", + "ast_data": "FunctionDef name:log arg:self arguments arg Assign Call Assign Call Call Assign Assign Call Assign Assign Call Call Call Call Assign Call Compare Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "is_distributing_by_cloning", + "source_code": "def is_distributing_by_cloning(model):\n if backend.is_tpu_strategy(model._distribution_strategy) and context.executing_eagerly:\n return False\n elif ops.executing_eagerly_outside_functions():\n return bool(model._compile_distribution)\n return True", + "docstring": "Decide whether this model is going to be distributed via cloning. We are going to distribute the model by cloning in graph mode. Args: model: Keras model to distribute. Returns: True if the is going to be distributed using cloning and False otherwise.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py", + "ast_data": "FunctionDef name:is_distributing_by_cloning arg:model arguments arg If BoolOp Call Return return:yes If Call Return return:yes Call Return return:yes" + }, + { + "library": "seaborn", + "name": "facet_data", + "source_code": "def facet_data(self):\n data = self.data\n if self.row_names:\n row_masks = [data[self._row_var] == n for n in self.row_names]\n else:\n row_masks = [np.repeat(True, len(self.data))]\n if self.col_names:\n col_masks = [data[self._col_var] == n for n in self.col_names]\n else:\n col_masks = [np.repeat(True, len(self.data))]\n if self.hue_names:\n hue_masks = [data[self._hue_var] == n for n in self.hue_names]\n else:\n hue_masks = [np.repeat(True, len(self.data))]\n for (i, row), (j, col), (k, hue) in product(enumerate(row_masks), enumerate(col_masks), enumerate(hue_masks)):\n data_ijk = data[row & col & hue & self._not_na]\n yield ((i, j, k), data_ijk)", + "docstring": "Generator for name indices and data subsets for each facet. Yields ------ (i, j, k), data_ijk : tuple of ints, DataFrame The ints provide an index into the {row, col, hue}_names attribute, and the dataframe contains a subset of the full data corresponding to each facet. The generator yields subsets that correspond with the self.axes.flat iterator, or self.axes[i, j] when is None.", + "type": "method", + "file_path": "seaborn\\seaborn\\axisgrid.py", + "ast_data": "FunctionDef name:facet_data arg:self arguments arg Assign If Assign Compare Assign Call Call If Assign Compare Assign Call Call If Assign Compare Assign Call Call For Call Call Call Call Assign" + }, + { + "library": "matplotlib", + "name": "view_limits", + "source_code": "def view_limits(self, vmin, vmax):\n b = self._base\n vmin, vmax = self.nonsingular(vmin, vmax)\n if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':\n vmin = _decade_less_equal(vmin, b)\n vmax = _decade_greater_equal(vmax, b)\n return (vmin, vmax)", + "docstring": "Try to choose the view limits intelligently.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:view_limits arg:self arg:vmin arg:vmax arguments arg arg arg Assign Assign Call If Compare Assign Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "_select_and_clip_prob", + "source_code": "def _select_and_clip_prob(cdfprob, sfprob, cdf=True):\n p = np.where(cdf, cdfprob, sfprob)\n return _clip_prob(p)", + "docstring": "Selects either the CDF or SF, and then clips to range 0<=p<=1.", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_ksstats.py", + "ast_data": "FunctionDef name:_select_and_clip_prob arg:cdfprob arg:sfprob arg:cdf arguments arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "get_related_field", + "source_code": "def get_related_field(self):\n opts = self.through._meta\n if self.through_fields:\n field = opts.get_field(self.through_fields[0])\n else:\n for field in opts.fields:\n rel = getattr(field, 'remote_field', None)\n if rel and rel.model == self.model:\n break\n return field.foreign_related_fields[0]", + "docstring": "Return the field in the 'to' object to which this relationship is tied. Provided for symmetry with ManyToOneRel.", + "type": "method", + "file_path": "django\\django\\db\\models\\fields\\reverse_related.py", + "ast_data": "FunctionDef name:get_related_field arg:self arguments arg Assign If Assign Call For Assign Call If BoolOp Compare Return return:yes" + }, + { + "library": "cryptography", + "name": "public_bytes", + "source_code": "@abc.abstractmethod\ndef public_bytes(self, encoding: _serialization.Encoding, format: _serialization.PublicFormat) -> bytes:\n pass", + "docstring": "The serialized bytes of the public key.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed25519.py", + "ast_data": "FunctionDef name:public_bytes arg:self arg:encoding arg:format arguments arg arg arg" + }, + { + "library": "django", + "name": "save_new", + "source_code": "def save_new(self, form, commit=True):\n return form.save(commit=commit)", + "docstring": "Save and return a new model instance for the given form.", + "type": "method", + "file_path": "django\\django\\forms\\models.py", + "ast_data": "FunctionDef name:save_new arg:self arg:form arg:commit arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "maybe_get_next_module", + "source_code": "def maybe_get_next_module(node: Node, modules: dict[str, nn.Module], target_module_type: Optional[type[nn.Module]]=None, target_functional_type: Any=None) -> Optional[Node]:\n for user in node.users.keys():\n if user.op == 'call_module' and target_module_type is not None and isinstance(modules[str(user.target)], target_module_type):\n return user\n elif user.op == 'call_function' and target_functional_type is not None and (user.target == target_functional_type):\n return user\n return None", + "docstring": "Gets the next module that matches what is needed in is_target_module_type if it exists Args: node: The node whose users we want to look at target_module_type: Module type that we want to check target_functional_type: Functional type that we want to check", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py", + "ast_data": "FunctionDef name:maybe_get_next_module arg:node arg:modules arg:target_module_type arg:target_functional_type arguments arg arg arg arg For Call If BoolOp Compare Compare Call Call Return return:yes If BoolOp Compare Compare Compare Return return:yes Return return:no" + }, + { + "library": "matplotlib", + "name": "set_rotation_mode", + "source_code": "def set_rotation_mode(self, m):\n if m is None:\n m = 'default'\n else:\n _api.check_in_list(('anchor', 'default', 'xtick', 'ytick'), rotation_mode=m)\n self._rotation_mode = m\n self.stale = True", + "docstring": "Set text rotation mode. Parameters ---------- m : {None, 'default', 'anchor', 'xtick', 'ytick'} If ``.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\text.py", + "ast_data": "FunctionDef name:set_rotation_mode arg:self arg:m arguments arg arg If Compare Assign Call Assign Assign" + }, + { + "library": "pytorch", + "name": "CPUOffloadPolicy", + "source_code": "@dataclass\nclass CPUOffloadPolicy(OffloadPolicy):\n pin_memory: bool = True", + "docstring": "This offload policy offloads parameters, gradients, and optimizer states to CPU. Sharded parameters are copied host-to-device before all-gather. The all-gathered parameters are freed according to ``)", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_api.py", + "ast_data": "ClassDef name:CPUOffloadPolicy" + }, + { + "library": "matplotlib", + "name": "set_points", + "source_code": "def set_points(self, points):\n if np.any(self._points != points):\n self._points = points\n self.invalidate()", + "docstring": "Set the points of the bounding box directly from an array of the form ``. No error checking is performed, as this method is mainly for internal use.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:set_points arg:self arg:points arguments arg arg If Call Compare Assign Call" + }, + { + "library": "django", + "name": "UpdateView", + "source_code": "class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView):\n template_name_suffix = '_form'", + "docstring": "View for updating an object, with a response rendered by a template.", + "type": "class", + "file_path": "django\\django\\views\\generic\\edit.py", + "ast_data": "ClassDef name:UpdateView Assign" + }, + { + "library": "pandas", + "name": "view", + "source_code": "def view(self, cls=None) -> Self:\n result = self.copy()\n result._id = self._id\n return result", + "docstring": "this is defined as a copy with the same identity", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexes\\multi.py", + "ast_data": "FunctionDef name:view arg:self arg:cls arguments arg arg Assign Call Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "op_is_int8_dynamically_quantized", + "source_code": "def op_is_int8_dynamically_quantized(qconfig) -> bool:\n activation_dtype, weight_dtype, activation_is_dynamic = get_qconfig_dtypes(qconfig)\n return activation_dtype in [torch.quint8, torch.uint8] and weight_dtype in [torch.qint8, torch.int8] and activation_is_dynamic", + "docstring": "Given a qconfig, returns True if this op is using int8 dynamic quantization", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\utils.py", + "ast_data": "FunctionDef name:op_is_int8_dynamically_quantized arg:qconfig arguments arg Assign Call Return return:yes BoolOp Compare Compare" + }, + { + "library": "tensorflow", + "name": "non_trainable_weights", + "source_code": "@property\ndef non_trainable_weights(self):\n return self.non_trainable_variables", + "docstring": "List of non-trainable weights/variables created by the Template.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py", + "ast_data": "FunctionDef name:non_trainable_weights arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_resolve_grad_captures", + "source_code": "def _resolve_grad_captures(body_graph, body_grad_graph, while_op):\n new_capture_inputs = []\n for t in body_grad_graph.external_captures:\n if t.graph == body_graph:\n for i, output in enumerate(t.graph.outputs):\n if output is t:\n t = while_op.outputs[i]\n break\n assert t.graph == body_graph.outer_graph\n new_capture_inputs.append(t)\n return new_capture_inputs", + "docstring": "Returns the tensors to pass as captured inputs to . may have external references to: 1. Its outer graph containing the input gradients. These are left as-is. 2. Accumulators captured from the forward-pass graph. These should have been added as outputs after the gradient graph was built. We replace these with the corresponding output of , i.e. a tensor in . In the case of nested control flow or functions, the gradient logic handling will make sure the tensor from is also correctly captured. Args: body_graph: FuncGraph. The forward-pass body function. body_grad_graph: FuncGraph. The body gradients function. while_op: The forward-pass While Operation calling . Returns: A list of input tensors to be passed as the captured inputs to .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py", + "ast_data": "FunctionDef name:_resolve_grad_captures arg:body_graph arg:body_grad_graph arg:while_op arguments arg arg arg Assign For If Compare For Call If Compare Assign Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_fix_chunks", + "source_code": "def _fix_chunks(self) -> None:\n if not self._fix_chunk_order:\n return\n chunk_indices = {id(chunk): i for i, chunk in enumerate(self._chunks)}\n to_fix = [self._chunked_message]\n while to_fix:\n for field in to_fix.pop().chunked_fields:\n if field.message.chunked_fields:\n to_fix.append(field.message)\n if not field.message.HasField('chunk_index'):\n continue\n chunk_addr = self._add_chunk_order[field.message.chunk_index]\n assert chunk_addr in chunk_indices, f'Found unexpected chunk {chunk_addr}'\n new_chunk_index = chunk_indices[chunk_addr]\n field.message.chunk_index = new_chunk_index\n self._add_chunk_order = [id(chunk) for chunk in self._chunks]\n self._fix_chunk_order = False", + "docstring": "Fixes chunk indices in the ChunkedMessage.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split.py", + "ast_data": "FunctionDef name:_fix_chunks arg:self arguments arg If Return return:no Assign Call Call Assign While For Call If Call If Call Assign Compare Assign Assign Assign Call Assign" + }, + { + "library": "scikit-learn", + "name": "is_jax_namespace", + "source_code": "def is_jax_namespace(xp: Namespace) -> bool:\n return xp.__name__ in {'jax.numpy', 'jax.experimental.array_api'}", + "docstring": "Returns True if is a JAX namespace. This includes `` which existed in older versions of JAX. See Also -------- array_namespace is_numpy_namespace is_cupy_namespace is_torch_namespace is_ndonnx_namespace is_dask_namespace is_pydata_sparse_namespace is_array_api_strict_namespace", + "type": "function", + "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py", + "ast_data": "FunctionDef name:is_jax_namespace arg:xp arguments arg Return return:yes Compare" + }, + { + "library": "tensorflow", + "name": "internal_grad_fn", + "source_code": "@ops.RegisterGradient(name)\ndef internal_grad_fn(unused_op, *result_grads):\n return tape_grad_fn(*result_grads)", + "docstring": "Custom grad fn wrapper.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\custom_gradient.py", + "ast_data": "FunctionDef name:internal_grad_fn arg:unused_op arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_parse_kernel_line_of_code", + "source_code": "def _parse_kernel_line_of_code(proper_kernel_fn_code: str) -> int:\n return len(proper_kernel_fn_code.splitlines())", + "docstring": "Return the line of code for the kernel excluding the decorators.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\metrics.py", + "ast_data": "FunctionDef name:_parse_kernel_line_of_code arg:proper_kernel_fn_code arguments arg Return return:yes Call Call" + }, + { + "library": "pandas", + "name": "_translate_body", + "source_code": "def _translate_body(self, idx_lengths: dict, max_rows: int, max_cols: int):\n rlabels = self.data.index.tolist()\n if not isinstance(self.data.index, MultiIndex):\n rlabels = [[x] for x in rlabels]\n body: list = []\n visible_row_count: int = 0\n for r, row_tup in [z for z in enumerate(self.data.itertuples()) if z[0] not in self.hidden_rows]:\n visible_row_count += 1\n if self._check_trim(visible_row_count, max_rows, body, 'row'):\n break\n body_row = self._generate_body_row((r, row_tup, rlabels), max_cols, idx_lengths)\n body.append(body_row)\n return body", + "docstring": "Build each within table as a list Use the following structure: +--------------------------------------------+---------------------------+ | index_header_0 ... index_header_n | data_by_column ... | +--------------------------------------------+---------------------------+ Also add elements to the cellstyle_map for more efficient grouped elements in block Parameters ---------- sparsify_index : bool Whether index_headers section will add rowspan attributes (>1) to elements. Returns ------- body : list The associated HTML elements needed for template rendering.", + "type": "method", + "file_path": "pandas\\pandas\\io\\formats\\style_render.py", + "ast_data": "FunctionDef name:_translate_body arg:self arg:idx_lengths arg:max_rows arg:max_cols arguments arg arg arg arg Assign Call If Call Assign For Call Call Compare If Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_as_indexed_slices_list", + "source_code": "def _as_indexed_slices_list(inputs, optimize=True):\n if not isinstance(inputs, (list, tuple)):\n raise TypeError(f'Expected a list or tuple, not {type(inputs)}.')\n outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]\n with_int32_index = [o.indices for o in outputs if o.indices.dtype == dtypes.int32]\n if not with_int32_index or len(with_int32_index) == len(outputs):\n return outputs\n casted_outputs = []\n for o in outputs:\n if o.indices.dtype == dtypes.int32:\n casted_outputs.append(indexed_slices.IndexedSlices(o.values, cast(o.indices, dtypes.int64), o.dense_shape))\n else:\n casted_outputs.append(o)\n return casted_outputs", + "docstring": "Convert all elements of 'inputs' to IndexedSlices. Additionally, homogenize the types of all the indices to either int32 or int64. Args: inputs: List containing either Tensor or IndexedSlices objects. optimize: if true, attempt to optimize the conversion of each input. Returns: A list of IndexedSlices objects. Raises: TypeError: If 'inputs' is not a list or a tuple.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", + "ast_data": "FunctionDef name:_as_indexed_slices_list arg:inputs arg:optimize arguments arg arg If Call Raise Call Call Assign Call Assign Compare If BoolOp Compare Call Call Return return:yes Assign For If Compare Call Call Call Call Return return:yes" + }, + { + "library": "scrapy", + "name": "set_environ", + "source_code": "@contextmanager\ndef set_environ(**kwargs: str) -> Iterator[None]:\n original_env = {k: os.environ.get(k) for k in kwargs}\n os.environ.update(kwargs)\n try:\n yield\n finally:\n for k, v in original_env.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v", + "docstring": "Temporarily set environment variables inside the context manager and fully restore previous environment afterwards", + "type": "function", + "file_path": "scrapy\\scrapy\\utils\\misc.py", + "ast_data": "FunctionDef name:set_environ arguments arg Assign Call Call Try For Call If Compare Assign" + }, + { + "library": "matplotlib", + "name": "intervalx", + "source_code": "@property\ndef intervalx(self):\n return self.get_points()[:, 0]", + "docstring": "The pair of *x* coordinates that define the bounding box. This is not guaranteed to be sorted from left to right.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:intervalx arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_add_sparse_feature", + "source_code": "def _add_sparse_feature(self, key, feature):\n if not feature.index_key:\n raise ValueError(f'Missing index_key for SparseFeature {feature}.')\n if not feature.value_key:\n raise ValueError(f'Missing value_key for SparseFeature {feature}.')\n if not feature.dtype:\n raise ValueError(f'Missing type for feature {key}. Received feature={feature}.')\n index_keys = feature.index_key\n if isinstance(index_keys, str):\n index_keys = [index_keys]\n elif len(index_keys) > 1:\n tf_logging.warning('SparseFeature is a complicated feature config and should only be used after careful consideration of VarLenFeature.')\n for index_key in sorted(index_keys):\n self._add_sparse_key(index_key, dtypes.int64)\n self._add_sparse_key(feature.value_key, feature.dtype)", + "docstring": "Adds a SparseFeature.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py", + "ast_data": "FunctionDef name:_add_sparse_feature arg:self arg:key arg:feature arguments arg arg arg If Raise Call If Raise Call If Raise Call Assign If Call Assign If Compare Call Call For Call Call Call" + }, + { + "library": "scipy", + "name": "GaussLegendreQuadrature", + "source_code": "class GaussLegendreQuadrature(FixedRule):\n\n def __init__(self, npoints, xp=None):\n if npoints < 2:\n raise ValueError('At least 2 nodes required for Gauss-Legendre cubature')\n self.npoints = npoints\n if xp is None:\n xp = np_compat\n self.xp = array_namespace(xp.empty(0))\n\n @cached_property\n def nodes_and_weights(self):\n nodes, weights = roots_legendre(self.npoints)\n return (self.xp.asarray(nodes, dtype=self.xp.float64), self.xp.asarray(weights, dtype=self.xp.float64))", + "docstring": "Gauss-Legendre quadrature. Parameters ---------- npoints : int Number of nodes for the higher-order rule. xp : array_namespace, optional The namespace for the node and weight arrays. Default is None, where NumPy is used. Examples -------- Evaluate a 1D integral. Note in this example that `` returns an array, so the estimates will also be arrays. >>> import numpy as np >>> from scipy.integrate import cubature >>> from scipy.integrate._rules import GaussLegendreQuadrature >>> def f(x): ... return np.cos(x) >>> rule = GaussLegendreQuadrature(21) # Use 21-point GaussLegendre >>> a, b = np.array([0]), np.array([1]) >>> rule.estimate(f, a, b) # True value sin(1), approximately 0.84147 array([0.84147098]) >>> rule.estimate_error(f, a, b) array([1.11022302e-16])", + "type": "class", + "file_path": "scipy\\scipy\\integrate\\_rules\\_gauss_legendre.py", + "ast_data": "ClassDef name:GaussLegendreQuadrature FunctionDef name:__init__ arg:self arg:npoints arg:xp arguments arg arg arg If Compare Raise Call Assign If Compare Assign Assign Call Call FunctionDef name:nodes_and_weights arg:self arguments arg Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "parse_args", + "source_code": "def parse_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser(description='Extracts ResultStore links from a build log.\\nThese can be then printed out, and/or output into a JUnit-based XML file inside a specified directory.')\n parser.add_argument('build_log', help='Path to a build log.')\n parser.add_argument('--xml-out-path', required=False, help='Path to which to output the JUnit-based XML with ResultStore links.')\n parser.add_argument('--print', action='store_true', dest='print', default=False, help='Whether to print out a short summary with the found ResultStore links (if any).')\n parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=False, help='Prints out lines helpful for debugging.')\n parsed_args = parser.parse_args()\n if not parsed_args.print and (not parsed_args.xml_out_path):\n raise TypeError('`--print` or `--xml-out-path` must be specified')\n return parsed_args", + "docstring": "Parses the commandline args.", + "type": "function", + "file_path": "tensorflow\\ci\\official\\utilities\\extract_resultstore_links.py", + "ast_data": "FunctionDef name:parse_args arguments Assign Call Call Call Call Call Assign Call If BoolOp Raise Call Return return:yes" + }, + { + "library": "pytorch", + "name": "ChoiceCaller", + "source_code": "class ChoiceCaller:\n\n def __init__(self, name: str, input_nodes: list[Buffer], layout: Layout, description: str) -> None:\n super().__init__()\n self.name = name\n self.layout = layout\n self.input_nodes = input_nodes\n self.description = description\n\n def benchmark(self, *args, out) -> float:\n algo = self.to_callable()\n return benchmarker.benchmark(algo, args, {'out': out})\n\n def call_name(self) -> str:\n raise NotImplementedError\n\n def to_callable(self):\n raise NotImplementedError\n\n def hash_key(self) -> str:\n raise NotImplementedError\n\n def output_node(self) -> TensorBox:\n raise NotImplementedError\n\n def info_dict(self) -> dict[str, Union[PrimitiveInfoType, list[PrimitiveInfoType]]]:\n return {}\n\n def autoheuristic_id(self) -> str:\n return 'unsupported_choice'", + "docstring": "Represents a possible choice used in autotune_process.py. During autotuning, self.benchmark() is first called to get benchmark result, and if this choice is selected, self.output_node() is called to get the output_node. Children classes: TritonTemplateCaller, CUDATemplateCaller.", + "type": "class", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "ClassDef name:ChoiceCaller FunctionDef name:__init__ arg:self arg:name arg:input_nodes arg:layout arg:description arguments arg arg arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:benchmark arg:self arguments arg arg arg Assign Call Return return:yes Call FunctionDef name:call_name arg:self arguments arg Raise FunctionDef name:to_callable arg:self arguments arg Raise FunctionDef name:hash_key arg:self arguments arg Raise FunctionDef name:output_node arg:self arguments arg Raise FunctionDef name:info_dict arg:self arguments arg Return return:no FunctionDef name:autoheuristic_id arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "_empty", + "source_code": "@classmethod\ndef _empty(cls, shape: Shape, dtype: CategoricalDtype) -> Self:\n arr = cls._from_sequence([], dtype=dtype)\n backing = np.zeros(shape, dtype=arr._ndarray.dtype)\n return arr._from_backing_data(backing)", + "docstring": "Analogous to np.empty(shape, dtype=dtype) Parameters ---------- shape : tuple[int] dtype : CategoricalDtype", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", + "ast_data": "FunctionDef name:_empty arg:cls arg:shape arg:dtype arguments arg arg arg Assign Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "select_related", + "source_code": "def select_related(self, *fields):\n self._not_support_combined_queries('select_related')\n if self._fields is not None:\n raise TypeError('Cannot call select_related() after .values() or .values_list()')\n obj = self._chain()\n if fields == (None,):\n obj.query.select_related = False\n elif fields:\n obj.query.add_select_related(fields)\n else:\n obj.query.select_related = True\n return obj", + "docstring": "Return a new QuerySet instance that will select related objects. If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection. If select_related(None) is called, clear the list.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:select_related arg:self arguments arg arg Call If Compare Raise Call Assign Call If Compare Assign If Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_verify_ops", + "source_code": "def _verify_ops(graph_def: graph_pb2.GraphDef, namespace_whitelist):\n if namespace_whitelist is None:\n return\n invalid_ops = []\n invalid_namespaces = set()\n all_operations = []\n all_operations.extend(meta_graph.ops_used_by_graph_def(graph_def))\n for op in all_operations:\n if '>' in op:\n namespace = op.split('>')[0]\n if namespace not in namespace_whitelist:\n invalid_ops.append(op)\n invalid_namespaces.add(namespace)\n if invalid_ops:\n raise ValueError(f\"Attempted to save ops from non-whitelisted namespaces to SavedModel: {invalid_ops}.\\nPlease verify that these ops should be saved, since they must be available when loading the SavedModel. If loading from Python, you must import the library defining these ops. From C++, link the custom ops to the serving binary. Once you've confirmed this, add the following namespaces to the `namespace_whitelist` argument in tf.saved_model.SaveOptions: {invalid_namespaces}.\")", + "docstring": "Verifies that all namespaced ops in the graph are whitelisted. Args: graph_def: the GraphDef to validate. namespace_whitelist: a list of namespaces to allow. If , all will be allowed. If an op does not have a namespace, it will be allowed. Raises: ValueError: If the graph contains ops that violate the whitelist.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py", + "ast_data": "FunctionDef name:_verify_ops arg:graph_def arg:namespace_whitelist arguments arg arg If Compare Return return:no Assign Assign Call Assign Call Call For If Compare Assign Call If Compare Call Call If Raise Call" + }, + { + "library": "tensorflow", + "name": "stop_requested", + "source_code": "@property\ndef stop_requested(self):\n return self._stop_requested", + "docstring": "Returns whether a stop is requested or not. If true, stops iterations. Returns: A", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py", + "ast_data": "FunctionDef name:stop_requested arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "codegen_node", + "source_code": "def codegen_node(self, node: Union[FusedSchedulerNode, SchedulerNode]) -> None:\n raise NotImplementedError", + "docstring": "Generate a kernel given a list of pre-fused nodes.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\scheduler.py", + "ast_data": "FunctionDef name:codegen_node arg:self arg:node arguments arg arg Raise" + }, + { + "library": "tensorflow", + "name": "_ragged_tensor_sparse_categorical_crossentropy", + "source_code": "@dispatch.dispatch_for_types(sparse_categorical_crossentropy, ragged_tensor.RaggedTensor)\ndef _ragged_tensor_sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):\n fn = functools.partial(sparse_categorical_crossentropy, from_logits=from_logits, axis=axis)\n return _ragged_tensor_apply_loss(fn, y_true, y_pred, y_pred_extra_dim=True)", + "docstring": "Implements support for handling RaggedTensors. Expected y_pred shape: (batch, sequence_len, n_classes) with sequence_len being variable per batch. Return shape: (batch, sequence_len). When used by SparseCategoricalCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the loss over the number of elements independent of the batch. E.g. if the RaggedTensor has 2 batches with [2, 1] values respectively, the resulting loss is the sum of the individual loss values divided by 3.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", + "ast_data": "FunctionDef name:_ragged_tensor_sparse_categorical_crossentropy arg:y_true arg:y_pred arg:from_logits arg:axis arguments arg arg arg arg Assign Call Return return:yes Call Call" + }, + { + "library": "scipy", + "name": "standard_deviation", + "source_code": "@abstractmethod\ndef standard_deviation(self, *, method):\n raise NotImplementedError()", + "docstring": "Standard deviation (square root of the second central moment) Parameters ---------- method : {None, 'formula', 'transform', 'normalize', 'quadrature', 'cache'} Method used to calculate the central second moment. Not all methods are available for all distributions. See for details. See Also -------- variance mean moment References ---------- .. [1] Standard deviation, *Wikipedia*, Examples -------- Instantiate a distribution with the desired parameters: >>> from scipy import stats >>> X = stats.Normal(mu=1., sigma=2.) Evaluate the standard deviation: >>> X.standard_deviation() 2.0 >>> X.standard_deviation() == X.moment(order=2, kind='central')**0.5 == X.sigma True", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_probability_distribution.py", + "ast_data": "FunctionDef name:standard_deviation arg:self arguments arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "parse_ranges_highlight", + "source_code": "def parse_ranges_highlight(ranges_string):\n ranges = None\n\n def ranges_filter(x):\n r = np.zeros(x.shape, dtype=bool)\n for range_start, range_end in ranges:\n r = np.logical_or(r, np.logical_and(x >= range_start, x <= range_end))\n return r\n if ranges_string:\n ranges = command_parser.parse_ranges(ranges_string)\n return tensor_format.HighlightOptions(ranges_filter, description=ranges_string)\n else:\n return None", + "docstring": "Process ranges highlight string. Args: ranges_string: (str) A string representing a numerical range of a list of numerical ranges. See the help info of the -r flag of the print_tensor command for more details. Returns: An instance of tensor_format.HighlightOptions, if range_string is a valid representation of a range or a list of ranges.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\cli_shared.py", + "ast_data": "FunctionDef name:parse_ranges_highlight arg:ranges_string arguments arg Assign FunctionDef name:ranges_filter arg:x arguments arg Assign Call For Assign Call Call Compare Compare Return return:yes If Assign Call Return return:yes Call Return return:no" + }, + { + "library": "pytorch", + "name": "del_tensors", + "source_code": "def del_tensors(self, names: Iterable[str]) -> None:\n for name in names:\n self.del_tensor(name)", + "docstring": "Delete the attributes specified by the given paths. For example, to delete the attributes mod.layer1.conv1.weight and mod.layer1.conv1.bias, use accessor.del_tensors([\"layer1.conv1.weight\", \"layer1.conv1.bias\"])", + "type": "method", + "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py", + "ast_data": "FunctionDef name:del_tensors arg:self arg:names arguments arg arg For Call" + }, + { + "library": "tensorflow", + "name": "_dynamic_ragged_shape_init", + "source_code": "def _dynamic_ragged_shape_init(fields, shape, nrows, row_partitions):\n assert isinstance(fields, dict), fields\n assert isinstance(shape, tensor_shape.TensorShape), shape\n assert nrows is None or isinstance(nrows, tensor.Tensor), nrows\n assert isinstance(row_partitions, tuple), row_partitions\n rank = shape.rank\n if rank is None:\n raise TypeError(\"StructuredTensor's shape must have known rank.\")\n dtype = _find_shape_dtype(fields, nrows, row_partitions)\n if rank == 0:\n return dynamic_ragged_shape.DynamicRaggedShape._from_inner_shape(array_ops.zeros((0,), dtype=dtype))\n if rank == 1:\n alt_value = shape[0]\n if isinstance(alt_value, tensor_shape.Dimension):\n alt_value = alt_value.value\n if alt_value is not None:\n nrows = alt_value\n return dynamic_ragged_shape.DynamicRaggedShape._from_inner_shape([nrows], dtype=dtype)\n return dynamic_ragged_shape.DynamicRaggedShape.from_row_partitions(row_partitions, dtype=dtype)", + "docstring": "Produce a DynamicRaggedShape for StructuredTensor.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor_dynamic.py", + "ast_data": "FunctionDef name:_dynamic_ragged_shape_init arg:fields arg:shape arg:nrows arg:row_partitions arguments arg arg arg arg Call Call BoolOp Compare Call Call Assign If Compare Raise Call Assign Call If Compare Return return:yes Call Call If Compare Assign If Call Assign If Compare Assign Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "batch_size", + "source_code": "@abc.abstractmethod\ndef batch_size(self):\n raise NotImplementedError", + "docstring": "Return the batch size of the dataset created. For certain type of the data input, the batch size is known, and even required, like numpy array. Where as for dataset, the batch is unknown unless we take a peek. Returns: int, the batch size of the dataset, or None if it is unknown.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", + "ast_data": "FunctionDef name:batch_size arg:self arguments arg Raise" + }, + { + "library": "pytorch", + "name": "InternalError", + "source_code": "class InternalError(Exception):\n\n def __init__(self, message: str) -> None:\n super().__init__(message)", + "docstring": "Raised when an internal invariance is violated in EXIR stack. Should hint users to report a bug to dev and expose the original error message.", + "type": "class", + "file_path": "pytorch\\torch\\_export\\error.py", + "ast_data": "ClassDef name:InternalError FunctionDef name:__init__ arg:self arg:message arguments arg arg Call Call" + }, + { + "library": "pandas", + "name": "DuplicateLabelError", + "source_code": "class DuplicateLabelError(ValueError):\n pass", + "docstring": "Error raised when an operation would introduce duplicate labels. This error is typically encountered when performing operations on objects with and the operation would result in duplicate labels in the index. Duplicate labels can lead to ambiguities in indexing and reduce data integrity. See Also -------- Series.set_flags : Return a new `` object to new index with optional filling logic. Examples -------- >>> s = pd.Series([0, 1, 2], index=[\"a\", \"b\", \"c\"]).set_flags( ... allows_duplicate_labels=False ... ) >>> s.reindex([\"a\", \"a\", \"b\"]) Traceback (most recent call last): ... DuplicateLabelError: Index has duplicates. positions label a [0, 1]", + "type": "class", + "file_path": "pandas\\pandas\\errors\\__init__.py", + "ast_data": "ClassDef name:DuplicateLabelError" + }, + { + "library": "authlib", + "name": "register_algorithm", + "source_code": "@classmethod\ndef register_algorithm(cls, algorithm):\n if not algorithm or algorithm.algorithm_type != 'JWE':\n raise ValueError(f'Invalid algorithm for JWE, {algorithm!r}')\n if algorithm.algorithm_location == 'alg':\n cls.ALG_REGISTRY[algorithm.name] = algorithm\n elif algorithm.algorithm_location == 'enc':\n cls.ENC_REGISTRY[algorithm.name] = algorithm\n elif algorithm.algorithm_location == 'zip':\n cls.ZIP_REGISTRY[algorithm.name] = algorithm", + "docstring": "Register an algorithm for `` of JWE.", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7516\\jwe.py", + "ast_data": "FunctionDef name:register_algorithm arg:cls arg:algorithm arguments arg arg If BoolOp Compare Raise Call If Compare Assign If Compare Assign If Compare Assign" + }, + { + "library": "cherrypy", + "name": "__init__", + "source_code": "def __init__(self):\n self.accessed_headers = set()\n super(MonitoredHeaderMap, self).__init__()", + "docstring": "Initialize a monitored HTTP header mapping.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\cptools.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign Call Call Call" + }, + { + "library": "scikit-learn", + "name": "decision_function", + "source_code": "def decision_function(self, X):\n if self.methods_to_check == 'all' or 'decision_function' in self.methods_to_check:\n X, y = self._check_X_y(X)\n rng = check_random_state(self.random_state)\n if len(self.classes_) == 2:\n return rng.randn(_num_samples(X))\n else:\n return rng.randn(_num_samples(X), len(self.classes_))", + "docstring": "Confidence score. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data. Returns ------- decision : ndarray of shape (n_samples,) if n_classes == 2 else (n_samples, n_classes) Confidence score.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\utils\\_mocking.py", + "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg If BoolOp Compare Compare Assign Call Assign Call If Compare Call Return return:yes Call Call Return return:yes Call Call Call" + }, + { + "library": "pandas", + "name": "_gotitem", + "source_code": "def _gotitem(self, key, ndim: int, subset=None):\n raise AbstractMethodError(self)", + "docstring": "sub-classes to define return a sliced object Parameters ---------- key : str / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on", + "type": "method", + "file_path": "pandas\\pandas\\core\\base.py", + "ast_data": "FunctionDef name:_gotitem arg:self arg:key arg:ndim arg:subset arguments arg arg arg arg Raise Call" + }, + { + "library": "scikit-learn", + "name": "_get_output_config", + "source_code": "def _get_output_config(method, estimator=None):\n est_sklearn_output_config = getattr(estimator, '_sklearn_output_config', {})\n if method in est_sklearn_output_config:\n dense_config = est_sklearn_output_config[method]\n else:\n dense_config = get_config()[f'{method}_output']\n supported_outputs = ADAPTERS_MANAGER.supported_outputs\n if dense_config not in supported_outputs:\n raise ValueError(f'output config must be in {sorted(supported_outputs)}, got {dense_config}')\n return {'dense': dense_config}", + "docstring": "Get output config based on estimator and global configuration. Parameters ---------- method : {\"transform\"} Estimator's method for which the output container is looked up. estimator : estimator instance or None Estimator to get the output configuration from. If , check global configuration is used. Returns ------- config : dict Dictionary with keys: - \"dense\": specifies the dense container for . This can be or .", + "type": "function", + "file_path": "scikit-learn\\sklearn\\utils\\_set_output.py", + "ast_data": "FunctionDef name:_get_output_config arg:method arg:estimator arguments arg arg Assign Call If Compare Assign Assign Call Assign If Compare Raise Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "TorchFunctionMode", + "source_code": "class TorchFunctionMode:\n inner: 'TorchFunctionMode'\n\n def __init__(self) -> None:\n pass\n\n def __torch_function__(self, func, types, args=(), kwargs=None):\n raise NotImplementedError\n\n def __enter__(self):\n _push_mode(self)\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n _pop_mode()\n\n @classmethod\n def push(cls, *args, **kwargs):\n warnings.warn('`Mode.push()` is no longer necessary and can be replaced with just `with Mode()`')\n instance = cls(*args, **kwargs)\n return instance", + "docstring": "A `TorchFunctionMode` to make PyTorch API self-referential (beware of infinite loops, in this case!)", + "type": "class", + "file_path": "pytorch\\torch\\overrides.py", + "ast_data": "ClassDef name:TorchFunctionMode FunctionDef name:__init__ arg:self arguments arg FunctionDef name:__torch_function__ arg:self arg:func arg:types arg:args arg:kwargs arguments arg arg arg arg arg Raise FunctionDef name:__enter__ arg:self arguments arg Call Return return:yes FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg Call FunctionDef name:push arg:cls arguments arg arg arg Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "pack_sequence_as", + "source_code": "def pack_sequence_as(structure, flat_sequence):\n flat_sequence = list(flat_sequence)\n flattened_structure = nest.flatten(structure, expand_composites=True)\n if len(flattened_structure) != len(flat_sequence):\n raise ValueError('Mismatch in element count')\n for i in range(len(flat_sequence)):\n if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):\n flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(old_ta=flattened_structure[i], flow=flat_sequence[i])\n return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)", + "docstring": "Like but also builds TensorArrays from flows. Args: structure: The structure to pack into. May contain Tensors, CompositeTensors, or TensorArrays. flat_sequence: An iterable containing tensors. Returns: A nested structure. Raises: AssertionError if and are not compatible.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py", + "ast_data": "FunctionDef name:pack_sequence_as arg:structure arg:flat_sequence arguments arg arg Assign Call Assign Call If Compare Call Call Raise Call For Call Call If Call Assign Call Return return:yes Call" + }, + { + "library": "django", + "name": "full_clean", + "source_code": "def full_clean(self):\n self._errors = ErrorDict(renderer=self.renderer)\n if not self.is_bound:\n return\n self.cleaned_data = {}\n if self.empty_permitted and (not self.has_changed()):\n return\n self._clean_fields()\n self._clean_form()\n self._post_clean()", + "docstring": "Clean all of self.data and populate self._errors and self.cleaned_data.", + "type": "method", + "file_path": "django\\django\\forms\\forms.py", + "ast_data": "FunctionDef name:full_clean arg:self arguments arg Assign Call If Return return:no Assign If BoolOp Call Return return:no Call Call Call" + }, + { + "library": "numpy", + "name": "X11NotFoundError", + "source_code": "class X11NotFoundError(NotFoundError):\n pass", + "docstring": "X11 libraries not found.", + "type": "class", + "file_path": "numpy\\numpy\\distutils\\system_info.py", + "ast_data": "ClassDef name:X11NotFoundError" + }, + { + "library": "scipy", + "name": "get_wheel_names", + "source_code": "def get_wheel_names(version):\n http = http_manager()\n tmpl = re.compile(f'^.*{PREFIX}-{version}-.*\\\\.whl$')\n index_url = f'{STAGING_FILE_URL}'\n index_html = http.request('GET', index_url)\n soup = BeautifulSoup(index_html.data, 'html.parser')\n return soup.findAll(string=tmpl)", + "docstring": "Get wheel names from Anaconda HTML directory. This looks in the Anaconda multibuild-wheels-staging page and parses the HTML to get all the wheel names for a release version. Parameters ---------- version : str The release version. For instance, \"1.5.0\".", + "type": "function", + "file_path": "scipy\\tools\\download-wheels.py", + "ast_data": "FunctionDef name:get_wheel_names arg:version arguments arg Assign Call Assign Call Assign Assign Call Assign Call Return return:yes Call" + }, + { + "library": "authlib", + "name": "validate_introspection_endpoint_auth_methods_supported", + "source_code": "def validate_introspection_endpoint_auth_methods_supported(self):\n validate_array_value(self, 'introspection_endpoint_auth_methods_supported')", + "docstring": "OPTIONAL. JSON array containing a list of client authentication methods supported by this introspection endpoint. The valid client authentication method values are those registered in the IANA \"OAuth Token Endpoint Authentication Methods\" registry [IANA.OAuth.Parameters] or those registered in the IANA \"OAuth Access Token Types\" registry [IANA.OAuth.Parameters]. (These values are and will remain distinct, due to Section 7.2.) If omitted, the set of supported authentication methods MUST be determined by other means.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py", + "ast_data": "FunctionDef name:validate_introspection_endpoint_auth_methods_supported arg:self arguments arg Call" + }, + { + "library": "seaborn", + "name": "_ColorPalette", + "source_code": "class _ColorPalette(list):\n\n def __enter__(self):\n from .rcmod import set_palette\n self._orig_palette = color_palette()\n set_palette(self)\n return self\n\n def __exit__(self, *args):\n from .rcmod import set_palette\n set_palette(self._orig_palette)\n\n def as_hex(self):\n hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n return _ColorPalette(hex)\n\n def _repr_html_(self):\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += f''\n html += ''\n return html", + "docstring": "Set the color palette in a with statement, otherwise be a list.", + "type": "class", + "file_path": "seaborn\\seaborn\\palettes.py", + "ast_data": "ClassDef name:_ColorPalette FunctionDef name:__enter__ arg:self arguments arg Assign Call Call Return return:yes FunctionDef name:__exit__ arg:self arguments arg arg Call FunctionDef name:as_hex arg:self arguments arg Assign Call Return return:yes Call FunctionDef name:_repr_html_ arg:self arguments arg Assign Assign Call Assign For Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_post_state_dict_hook", + "source_code": "@staticmethod\ndef _post_state_dict_hook(module: nn.Module, state_dict: dict[str, Any], prefix: str, *args: Any) -> dict[str, Any]:\n _replace_by_prefix(state_dict, f'{prefix}{_CHECKPOINT_PREFIX}', prefix)\n return state_dict", + "docstring": "_post_state_dict_hook() is called after the state_dict() of this FSDP module is executed. For ``, it will strip checkpoint-wrapped module prefix, so that this module can be loaded into non-checkpointed modules. It would still be able to be loaded into checkpoint-wrapped modules as this class, adds the prefix back before loading the state_dict.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\algorithms\\_checkpoint\\checkpoint_wrapper.py", + "ast_data": "FunctionDef name:_post_state_dict_hook arg:module arg:state_dict arg:prefix arguments arg arg arg arg Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "is_stationary", + "source_code": "def is_stationary(self):\n return self.kernel.is_stationary()", + "docstring": "Returns whether the kernel is stationary.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", + "ast_data": "FunctionDef name:is_stationary arg:self arguments arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "shape", + "source_code": "def shape(self):\n return self.data.shape", + "docstring": "Returns the shape tuple of the data variable. This is a read-only attribute and can not be modified in the same manner of other numpy arrays.", + "type": "method", + "file_path": "scipy\\scipy\\io\\_netcdf.py", + "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "_write_cache", + "source_code": "def _write_cache(step, event_file_suffix=None, **kwargs):\n file_suffix = _TT_EVENT_FILE_SUFFIX\n if event_file_suffix is not None:\n file_suffix = string_ops.string_join([file_suffix, event_file_suffix], separator='.')\n summary_write_ops = []\n summary_writer = summary.create_file_writer_v2(self._parameters.trace_dir, filename_suffix=file_suffix, max_queue=_TT_SUMMARY_MAX_QUEUE)\n graph.add_to_collection(TENSOR_TRACER_SUMMARY_COLLECTION, summary_writer)\n step_value = step[0]\n dt = step_value.dtype\n if dt.__ne__(dtypes.int64) and dt.__ne__(dtypes.uint64) and dt.__ne__(dtypes.float64):\n step_value = math_ops.cast(step_value, dtypes.int64)\n with summary_writer.as_default():\n summary_metadata = summary_pb2.SummaryMetadata(plugin_data=summary_pb2.SummaryMetadata.PluginData(plugin_name=_TT_TENSORBOARD_PLUGIN_NAME))\n for key, value in kwargs.items():\n if not self._parameters.collect_summary_per_core:\n if key == _TT_SUMMARY_TAG and value.shape.as_list()[0] != 1:\n value = self.aggregate_global_cache(value)\n with ops.control_dependencies([summary_writer.init()]):\n summary_write_ops.append(summary.write(_TT_SUMMARY_TAG + '/' + key + '.' + graph_summary_tag, value, metadata=summary_metadata, step=step_value))\n return control_flow_ops.group(summary_write_ops)", + "docstring": "Writes the given caches as tensor summary. Args: step: Step tensor with dimension [num_cores]. event_file_suffix: Event filename suffix tensor. **kwargs: The dictionary of tensors that needs to be written as summaries. Key and value pairs within kwargs correspond to the tag name, and tensor content that will be written using summary.write. The trace_modes that use this function are: - summary: In summary mode, kwargs includes a single (tag, content) pair which are, _TT_SUMMARY_TAG and a tf.float32 signature_cache variable. The dimension of the signature_cache is: num_cores x num_traced_tensors x num_signatures. - full_tensor_summary: kwargs will include all traced tensors. Tag and content correspond to the name of the tensor, and its actual content. Returns: A tf.Operation that needs to be executed for the host call dependencies.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", + "ast_data": "FunctionDef name:_write_cache arg:step arg:event_file_suffix arguments arg arg arg Assign If Compare Assign Call Assign Assign Call Call Assign Assign If BoolOp Call Call Call Assign Call With Call Assign Call Call For Call If If BoolOp Compare Compare Call Assign Call With Call Call Call Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "upper_border_begin", + "source_code": "@lru_cache(maxsize=256)\ndef upper_border_begin(self, n: int) -> tuple[int, int]:\n if not n >= (m2p := (self.m_num - self.m_num_mid)):\n raise ValueError(f'Parameter n must be >= ceil(m_num/2) = {m2p}!')\n w2 = self.win.real ** 2 + self.win.imag ** 2\n q2 = n // self.hop + 1\n q1 = max((n - self.m_num) // self.hop - 1, -1)\n for q_ in range(q2, q1, -1):\n k_ = q_ * self.hop + (self.m_num - self.m_num_mid)\n if k_ <= n or all(w2[n - k_:] == 0):\n return ((q_ + 1) * self.hop - self.m_num_mid, q_ + 1)\n return (0, 0)", + "docstring": "First signal index and first slice index affected by post-padding. Describes the point where the window does begin stick out to the right of the signal domain. A detailed example is given :ref: section of the :ref:. Parameters ---------- n : int Number of samples of input signal (must be ≥ half of the window length). Returns ------- k_ub : int Lowest signal index, where a touching time slice sticks out past the signal end. p_ub : int Lowest index of time slice of which the end sticks out past the signal end. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. lower_border_end: Where pre-padding effects end. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., - . p_range: Determine and validate slice index range. ShortTimeFFT: Class this method belongs to.", + "type": "method", + "file_path": "scipy\\scipy\\signal\\_short_time_fft.py", + "ast_data": "FunctionDef name:upper_border_begin arg:self arg:n arguments arg arg If Compare Raise Call Assign Assign Assign Call For Call Assign If BoolOp Compare Call Compare Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, group_key_start=1):\n self._group_key = group_key_start\n self._instance_key_table = {}\n self._lock = threading.Lock()\n self._known_groups = {}", + "docstring": "Initializes the object. Args: group_key_start: the starting integer of group key.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:group_key_start arguments arg arg Assign Assign Assign Call Assign" + }, + { + "library": "pandas", + "name": "is_bool_indexer", + "source_code": "def is_bool_indexer(key: Any) -> bool:\n if isinstance(key, (ABCSeries, np.ndarray, ABCIndex, ABCExtensionArray)) and (not isinstance(key, ABCMultiIndex)):\n if key.dtype == np.object_:\n key_array = np.asarray(key)\n if not lib.is_bool_array(key_array):\n na_msg = 'Cannot mask with non-boolean array containing NA / NaN values'\n if lib.is_bool_array(key_array, skipna=True):\n raise ValueError(na_msg)\n return False\n return True\n elif is_bool_dtype(key.dtype):\n return True\n elif isinstance(key, list):\n if len(key) > 0:\n if type(key) is not list:\n key = list(key)\n return lib.is_bool_list(key)\n return False", + "docstring": "Check whether is a valid boolean indexer. Parameters ---------- key : Any Only list-likes may be considered boolean indexers. All other types are not considered a boolean indexer. For array-like input, boolean ndarrays or ExtensionArrays with `keykey` is a valid array to index, and convert to an ndarray.", + "type": "function", + "file_path": "pandas\\pandas\\core\\common.py", + "ast_data": "FunctionDef name:is_bool_indexer arg:key arguments arg If BoolOp Call Call If Compare Assign Call If Call Assign If Call Raise Call Return return:yes Return return:yes If Call Return return:yes If Call If Compare Call If Compare Call Assign Call Return return:yes Call Return return:yes" + }, + { + "library": "django", + "name": "cleanup", + "source_code": "def cleanup(self):\n if self.is_templatized:\n if os.path.exists(self.work_path):\n os.unlink(self.work_path)", + "docstring": "Remove a preprocessed copy of a translatable file (if any).", + "type": "method", + "file_path": "django\\django\\core\\management\\commands\\makemessages.py", + "ast_data": "FunctionDef name:cleanup arg:self arguments arg If If Call Call" + }, + { + "library": "scikit-learn", + "name": "_minibatch_step", + "source_code": "def _minibatch_step(self, X, dictionary, random_state, step):\n batch_size = X.shape[0]\n code = _sparse_encode(X, dictionary, algorithm=self._fit_algorithm, alpha=self.alpha, n_jobs=self.n_jobs, positive=self.positive_code, max_iter=self.transform_max_iter, verbose=self.verbose)\n batch_cost = (0.5 * ((X - code @ dictionary) ** 2).sum() + self.alpha * np.sum(np.abs(code))) / batch_size\n self._update_inner_stats(X, code, batch_size, step)\n _update_dict(dictionary, X, code, self._A, self._B, verbose=self.verbose, random_state=random_state, positive=self.positive_dict)\n return batch_cost", + "docstring": "Perform the update on the dictionary for one minibatch.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py", + "ast_data": "FunctionDef name:_minibatch_step arg:self arg:X arg:dictionary arg:random_state arg:step arguments arg arg arg arg arg Assign Assign Call Assign Call Call Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_RightShift", + "source_code": "def _RightShift(x):\n rank = array_ops.rank(x)\n zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)\n pad = array_ops.concat([zeros, array_ops.constant([[1, 0], [0, 0]])], axis=0)\n return array_ops.pad(x[..., :-1, :], pad)", + "docstring": "Shifts next-to-last dimension to the right, adding zero on the left.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py", + "ast_data": "FunctionDef name:_RightShift arg:x arguments arg Assign Call Assign Call Assign Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_sum_rightmost", + "source_code": "def _sum_rightmost(value, dim):\n if dim == 0:\n return value\n required_shape = value.shape[:-dim] + (-1,)\n return value.reshape(required_shape).sum(-1)", + "docstring": "Sum out ``. dim (int): The number of rightmost dims to sum out.", + "type": "function", + "file_path": "pytorch\\torch\\distributions\\utils.py", + "ast_data": "FunctionDef name:_sum_rightmost arg:value arg:dim arguments arg arg If Compare Return return:yes Assign Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "home", + "source_code": "def home(self, *args):\n self._nav_stack.home()\n self.set_history_buttons()\n self._update_view()", + "docstring": "Restore the original view. For convenience of being directly connected as a GUI callback, which often get passed additional parameters, this method accepts arbitrary parameters, but does not use them.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:home arg:self arguments arg arg Call Call Call" + }, + { + "library": "kornia", + "name": "_singular_range_check", + "source_code": "def _singular_range_check(ranged_factor: Tensor, name: str, bounds: Optional[Tuple[float, float]]=None, skip_none: bool=False, mode: str='2d') -> None:\n if mode == '2d':\n dim_size = 2\n elif mode == '3d':\n dim_size = 3\n else:\n raise ValueError(f\"'mode' shall be either 2d or 3d. Got {mode}\")\n if skip_none and ranged_factor is None:\n return\n if bounds is None:\n bounds = (float('-inf'), float('inf'))\n if ranged_factor.dim() == 1 and len(ranged_factor) == dim_size:\n for f in ranged_factor:\n if not bounds[0] <= f <= bounds[1]:\n raise ValueError(f'{name} out of bounds. Expected inside {bounds}, got {ranged_factor}.')\n else:\n raise TypeError(f'{name} should be a float number or a tuple with length {dim_size} whose values between {bounds}.Got {ranged_factor}')", + "docstring": "Check if bounds[0] <= ranged_factor[0] <= bounds[1] and bounds[0] <= ranged_factor[1] <= bounds[1].", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\utils\\param_validation.py", + "ast_data": "FunctionDef name:_singular_range_check arg:ranged_factor arg:name arg:bounds arg:skip_none arg:mode arguments arg arg arg arg arg If Compare Assign If Compare Assign Raise Call If BoolOp Compare Return return:no If Compare Assign Call Call If BoolOp Compare Call Compare Call For If Compare Raise Call Raise Call" + }, + { + "library": "pandas", + "name": "_with_freq", + "source_code": "def _with_freq(self, freq) -> Self:\n if freq is None:\n pass\n elif len(self) == 0 and isinstance(freq, BaseOffset):\n if self.dtype.kind == 'm' and (not isinstance(freq, Tick)):\n raise TypeError('TimedeltaArray/Index freq must be a Tick')\n else:\n assert freq == 'infer'\n freq = to_offset(self.inferred_freq)\n arr = self.view()\n arr._freq = freq\n return arr", + "docstring": "Helper to get a view on the same data, with a new freq. Parameters ---------- freq : DateOffset, None, or \"infer\" Returns ------- Same type as self", + "type": "method", + "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", + "ast_data": "FunctionDef name:_with_freq arg:self arg:freq arguments arg arg If Compare If BoolOp Compare Call Call If BoolOp Compare Call Raise Call Compare Assign Call Assign Call Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, name, pivot):\n super(XLACompileContext, self).__init__()\n self._name = name\n self._name_as_bytes = compat.as_bytes(name)\n self._unsupported_ops = []\n self._pivot = pivot", + "docstring": "Builds a new XLACompileContext. Args: name: a unique name for the context, used to populate the attribute. pivot: a pivot node. Nodes in the XLACompileContext that do not have any inputs will have a control dependency on the pivot node. This ensures that nodes are correctly included in any enclosing control flow contexts.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\xla.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:pivot arguments arg arg arg Call Call Assign Assign Call Assign Assign" + }, + { + "library": "matplotlib", + "name": "_get_running_interactive_framework", + "source_code": "def _get_running_interactive_framework():\n QtWidgets = sys.modules.get('PyQt6.QtWidgets') or sys.modules.get('PySide6.QtWidgets') or sys.modules.get('PyQt5.QtWidgets') or sys.modules.get('PySide2.QtWidgets')\n if QtWidgets and QtWidgets.QApplication.instance():\n return 'qt'\n Gtk = sys.modules.get('gi.repository.Gtk')\n if Gtk:\n if Gtk.MAJOR_VERSION == 4:\n from gi.repository import GLib\n if GLib.main_depth():\n return 'gtk4'\n if Gtk.MAJOR_VERSION == 3 and Gtk.main_level():\n return 'gtk3'\n wx = sys.modules.get('wx')\n if wx and wx.GetApp():\n return 'wx'\n tkinter = sys.modules.get('tkinter')\n if tkinter:\n codes = {tkinter.mainloop.__code__, tkinter.Misc.mainloop.__code__}\n for frame in sys._current_frames().values():\n while frame:\n if frame.f_code in codes:\n return 'tk'\n frame = frame.f_back\n del frame\n macosx = sys.modules.get('matplotlib.backends._macosx')\n if macosx and macosx.event_loop_is_running():\n return 'macosx'\n if not _c_internal_utils.display_is_valid():\n return 'headless'\n return None", + "docstring": "Return the interactive framework whose event loop is currently running, if any, or \"headless\" if no event loop can be started, or None. Returns ------- Optional[str] One of the following values: \"qt\", \"gtk3\", \"gtk4\", \"wx\", \"tk\", \"macosx\", \"headless\", ``.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "FunctionDef name:_get_running_interactive_framework arguments Assign BoolOp Call Call Call Call If BoolOp Call Return return:yes Assign Call If If Compare If Call Return return:yes If BoolOp Compare Call Return return:yes Assign Call If BoolOp Call Return return:yes Assign Call If Assign For Call Call While If Compare Return return:yes Assign Assign Call If BoolOp Call Return return:yes If Call Return return:yes Return return:no" + }, + { + "library": "tensorflow", + "name": "set", + "source_code": "def set(self, value):\n pywrap_tfe.TFE_MonitoringStringGaugeCellSet(self._cell, value)", + "docstring": "Atomically set the value. Args: value: string value.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py", + "ast_data": "FunctionDef name:set arg:self arg:value arguments arg arg Call" + }, + { + "library": "pytorch", + "name": "ShadowLogger", + "source_code": "class ShadowLogger(Logger):\n\n def __init__(self):\n super().__init__()\n self.stats['float'] = []\n self.stats['quantized'] = []\n\n def forward(self, x, y):\n if len(x) > 1:\n x = x[0]\n if len(y) > 1:\n y = y[0]\n self.stats['quantized'].append(x.detach())\n self.stats['float'].append(y.detach())", + "docstring": "Class used in Shadow module to record the outputs of the original and shadow modules.", + "type": "class", + "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite.py", + "ast_data": "ClassDef name:ShadowLogger FunctionDef name:__init__ arg:self arguments arg Call Call Assign Assign FunctionDef name:forward arg:self arg:x arg:y arguments arg arg arg If Compare Call Assign If Compare Call Assign Call Call Call Call" + }, + { + "library": "matplotlib", + "name": "set_array", + "source_code": "def set_array(self, A):\n self.set_data(A)", + "docstring": "Retained for backwards compatibility - use set_data instead. Parameters ---------- A : array-like", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\image.py", + "ast_data": "FunctionDef name:set_array arg:self arg:A arguments arg arg Call" + }, + { + "library": "pytorch", + "name": "manual_seed_all", + "source_code": "def manual_seed_all(seed: int) -> None:\n seed = int(seed)\n\n def cb():\n for i in range(device_count()):\n default_generator = torch.cuda.default_generators[i]\n default_generator.manual_seed(seed)\n _lazy_call(cb, seed_all=True)", + "docstring": "Set the seed for generating random numbers on all GPUs. It's safe to call this function if CUDA is not available; in that case, it is silently ignored. Args: seed (int): The desired seed.", + "type": "function", + "file_path": "pytorch\\torch\\cuda\\random.py", + "ast_data": "FunctionDef name:manual_seed_all arg:seed arguments arg Assign Call FunctionDef name:cb arguments For Call Call Assign Call Call" + }, + { + "library": "pytorch", + "name": "get_lowered_module_name", + "source_code": "def get_lowered_module_name(root: torch.nn.Module, lowered_module: LOWERED_BACKEND_MODULE_TYPE) -> str:\n qualname = None\n i = 0\n while True:\n qualname = f'lowered_module_{i}'\n if not hasattr(root, qualname):\n break\n i += 1\n assert qualname is not None\n root.add_module(qualname, lowered_module)\n return qualname", + "docstring": "Adds the given lowered_module into the given root module and returns the name of the module added.", + "type": "function", + "file_path": "pytorch\\torch\\_higher_order_ops\\executorch_call_delegate.py", + "ast_data": "FunctionDef name:get_lowered_module_name arg:root arg:lowered_module arguments arg arg Assign Assign While Assign If Call Compare Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "get_fillstyle", + "source_code": "def get_fillstyle(self):\n return self._marker.get_fillstyle()", + "docstring": "Return the marker fill style. See also .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\lines.py", + "ast_data": "FunctionDef name:get_fillstyle arg:self arguments arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "reduce_mean", + "source_code": "def reduce_mean(self, x):\n return self.reduce(lambda y: math_ops.reduce_mean(y, axis=0), x)", + "docstring": "Performs a mean reduction on across pfor iterations. Note that this currently may not work inside a control flow construct. Args: x: an unvectorized Tensor. Returns: A Tensor that has same rank as . The value is the mean of the values of across the pfor iterations.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:reduce_mean arg:self arg:x arguments arg arg Return return:yes Call arguments arg Call" + }, + { + "library": "pytorch", + "name": "create_node", + "source_code": "@compatibility(is_backward_compatible=True)\ndef create_node(self, op: str, target: 'Target', args: Optional[tuple['Argument', ...]]=None, kwargs: Optional[dict[str, 'Argument']]=None, name: Optional[str]=None, type_expr: Optional[Any]=None) -> Node:\n if not args:\n args = ()\n else:\n assert isinstance(args, tuple), 'args must be a tuple'\n if not kwargs:\n kwargs = immutable_dict()\n else:\n assert isinstance(kwargs, dict), 'kwargs must be a dict'\n candidate = name if name is not None else self._target_to_str(target)\n name = self._graph_namespace.create_name(candidate, None)\n n = Node(self, name, op, target, args, kwargs, type_expr)\n if self.owning_module is not None and getattr(self.owning_module, '_create_node_hooks', None) is not None:\n for f in self.owning_module._create_node_hooks:\n f(n)\n self._graph_namespace.associate_name_with_obj(name, n)\n self._insert(n)\n self._find_nodes_lookup_table.insert(n)\n self._len += 1\n return n", + "docstring": "Create a `Graph.inserting_beforeGraph.inserting_after`. This will influence the name of the value assigned to in the Python generated code. type_expr (Optional[Any]): an optional type annotation representing the Python type the output of this node will have. Returns: The newly-created and inserted node.", + "type": "method", + "file_path": "pytorch\\torch\\fx\\graph.py", + "ast_data": "FunctionDef name:create_node arg:self arg:op arg:target arg:args arg:kwargs arg:name arg:type_expr arguments arg arg arg arg arg arg arg If Assign Call If Assign Call Call Assign Compare Call Assign Call Assign Call If BoolOp Compare Compare Call For Call Call Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "dump_next_step", + "source_code": "def dump_next_step(self):\n if not self._enabled:\n return\n self._dump_next_step = True\n self._slow_path_steps.add(self._step)", + "docstring": "Enable tracing and dump profiles at next step.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\profile_context.py", + "ast_data": "FunctionDef name:dump_next_step arg:self arguments arg If Return return:no Assign Call" + }, + { + "library": "tensorflow", + "name": "_sum_flops", + "source_code": "@ops.RegisterStatistics('Sum', 'flops')\ndef _sum_flops(graph, node):\n return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)", + "docstring": "Compute flops for Sum operation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py", + "ast_data": "FunctionDef name:_sum_flops arg:graph arg:node arguments arg arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "register_mock_hook", + "source_code": "def register_mock_hook(self, hook: ActionHook) -> RemovableHandle:\n handle = RemovableHandle(self._mock_hooks)\n self._mock_hooks[handle.id] = hook\n return handle", + "docstring": "Registers a mock hook on the exporter. The hook will be called each time a module matches against a :meth: pattern. It should have the following signature:: hook(exporter: PackageExporter, module_name: str) -> None Hooks will be called in order of registration. Returns: :class:: A handle that can be used to remove the added hook by calling ``.", + "type": "method", + "file_path": "pytorch\\torch\\package\\package_exporter.py", + "ast_data": "FunctionDef name:register_mock_hook arg:self arg:hook arguments arg arg Assign Call Assign Return return:yes" + }, + { + "library": "django", + "name": "datetime_cast_date_sql", + "source_code": "def datetime_cast_date_sql(self, sql, params, tzname):\n raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_date_sql() method.')", + "docstring": "Return the SQL to cast a datetime value to date value.", + "type": "method", + "file_path": "django\\django\\db\\backends\\base\\operations.py", + "ast_data": "FunctionDef name:datetime_cast_date_sql arg:self arg:sql arg:params arg:tzname arguments arg arg arg arg Raise Call" + }, + { + "library": "tensorflow", + "name": "_extract_outputs_from_fn", + "source_code": "def _extract_outputs_from_fn(layer, call_and_return_conditional_losses):\n if isinstance(layer, keras_load.RevivedLayer):\n return layer.keras_api.__call__\n\n def call(inputs, *args, **kwargs):\n return call_and_return_conditional_losses(inputs, *args, **kwargs)[0]\n return _create_call_fn_decorator(layer, call)", + "docstring": "Returns a function that returns only call function outputs.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py", + "ast_data": "FunctionDef name:_extract_outputs_from_fn arg:layer arg:call_and_return_conditional_losses arguments arg arg If Call Return return:yes FunctionDef name:call arg:inputs arguments arg arg arg Return return:yes Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "get_overridable_functions", + "source_code": "@_disable_user_warnings\ndef get_overridable_functions() -> dict[Any, list[Callable]]:\n return _get_overridable_functions()[0]", + "docstring": "List functions that are overridable via __torch_function__ Returns ------- Dict[Any, List[Callable]] A dictionary that maps namespaces that contain overridable functions to functions in that namespace that can be overridden.", + "type": "function", + "file_path": "pytorch\\torch\\overrides.py", + "ast_data": "FunctionDef name:get_overridable_functions arguments Return return:yes Call" + }, + { + "library": "pytorch", + "name": "sets", + "source_code": "@cached_property\ndef sets(self) -> list[TokenInfo]:\n return [t for i, t in enumerate(self.tokens) if self.is_set(i)]", + "docstring": "A list of tokens which use the built-in set symbol", + "type": "method", + "file_path": "pytorch\\tools\\linter\\adapters\\set_linter.py", + "ast_data": "FunctionDef name:sets arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "Softmax", + "source_code": "class Softmax(Layer):\n\n def __init__(self, axis=-1, **kwargs):\n super(Softmax, self).__init__(**kwargs)\n self.supports_masking = True\n self.axis = axis\n\n def call(self, inputs, mask=None):\n if mask is not None:\n adder = (1.0 - math_ops.cast(mask, inputs.dtype)) * _large_compatible_negative(inputs.dtype)\n inputs += adder\n if isinstance(self.axis, (tuple, list)):\n if len(self.axis) > 1:\n return math_ops.exp(inputs - math_ops.reduce_logsumexp(inputs, axis=self.axis, keepdims=True))\n else:\n return backend.softmax(inputs, axis=self.axis[0])\n return backend.softmax(inputs, axis=self.axis)\n\n def get_config(self):\n config = {'axis': self.axis}\n base_config = super(Softmax, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n return input_shape", + "docstring": "Softmax activation function. Example without mask: >>> inp = np.asarray([1., 2., 1.]) >>> layer = tf.keras.layers.Softmax() >>> layer(inp).numpy() array([0.21194157, 0.5761169 , 0.21194157], dtype=float32) >>> mask = np.asarray([True, False, True], dtype=bool) >>> layer(inp, mask).numpy() array([0.5, 0. , 0.5], dtype=float32) Input shape: Arbitrary. Use the keyword argument (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Args: axis: Integer, or list of Integers, axis along which the softmax normalization is applied. Call arguments: inputs: The inputs, or logits to the softmax layer. mask: A boolean mask of the same shape as . Defaults to . The mask specifies 1 to keep and 0 to mask. Returns: softmaxed output with the same shape as .", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\advanced_activations.py", + "ast_data": "ClassDef name:Softmax FunctionDef name:__init__ arg:self arg:axis arguments arg arg arg Call Call Assign Assign FunctionDef name:call arg:self arg:inputs arg:mask arguments arg arg arg If Compare Assign Call Call If Call If Compare Call Return return:yes Call Call Return return:yes Call Return return:yes Call FunctionDef name:get_config arg:self arguments arg Assign Assign Call Call Return return:yes Call Call Call Call Call FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_props", + "source_code": "def set_props(self, **props):\n artist = self._selection_artist\n props = cbook.normalize_kwargs(props, artist)\n artist.set(**props)\n if self.useblit:\n self.update()", + "docstring": "Set the properties of the selector artist. See the *props* argument in the selector docstring to know which properties are supported.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", + "ast_data": "FunctionDef name:set_props arg:self arguments arg arg Assign Assign Call Call If Call" + }, + { + "library": "pytorch", + "name": "opset_version", + "source_code": "@property\ndef opset_version(self) -> int:\n return self._opset_version", + "docstring": "The ONNX opset version the exporter should target.", + "type": "method", + "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_registration.py", + "ast_data": "FunctionDef name:opset_version arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "is_cancelled", + "source_code": "@property\ndef is_cancelled(self):\n return pywrap_tfe.TFE_CancellationManagerIsCancelled(self._impl)", + "docstring": "Returns if has been called.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\eager\\cancellation.py", + "ast_data": "FunctionDef name:is_cancelled arg:self arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_get_assigned_rank", + "source_code": "def _get_assigned_rank(self, bucket_index: int) -> int:\n assert not self._overlap_info.shard_buckets, 'The bucket assignment requires global bucket information and will be computed later; there should be no need to use this method'\n return bucket_index % self.world_size", + "docstring": "Return the single rank assigned to a :class: gradient bucket. Arguments: bucket_index (int): index of the :class: bucket for which to get the assigned rank.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py", + "ast_data": "FunctionDef name:_get_assigned_rank arg:self arg:bucket_index arguments arg arg Return return:yes" + }, + { + "library": "pytorch", + "name": "read_data", + "source_code": "@abc.abstractmethod\ndef read_data(self, plan: LoadPlan, planner: LoadPlanner) -> Future[None]:\n pass", + "docstring": "Read all items from `` to get access to the tensors that in should load data into. It's the StorageLayer responsibility to properly schedule any cross device copies required. Args: plan (LoadPlan): The local plan to execute on planner (LoadPlanner): The planner object to use to resolve items. Returns: A future that completes once all reads are finished.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py", + "ast_data": "FunctionDef name:read_data arg:self arg:plan arg:planner arguments arg arg arg" + }, + { + "library": "pytorch", + "name": "parent", + "source_code": "@property\ndef parent(self) -> Optional[CUDAGraphNode]:\n return self._parent() if self._parent is not None else None", + "docstring": "unwraps the weakref to _parent", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", + "ast_data": "FunctionDef name:parent arg:self arguments arg Return return:yes Compare Call" + }, + { + "library": "pytorch", + "name": "add_param_group", + "source_code": "def add_param_group(self, param_group: Mapping[str, Any]) -> None:\n assert isinstance(param_group, dict), 'param group must be a dict'\n params = param_group['params']\n if isinstance(params, torch.Tensor):\n param_group['params'] = [params]\n else:\n param_group['params'] = list(params)\n param_to_key = {param: key for key, param in self.named_parameters.items()}\n for param in param_group['params']:\n if param not in param_to_key:\n raise ValueError('some parameters are not in the module')\n self.ordered_param_keys.append(param_to_key[param])\n self._optimizer.add_param_group(param_group)\n self.param_groups = self._optimizer.param_groups", + "docstring": "Add a param group to the :class: s . Warning: This API is still in development and subject to change.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\optim\\named_optimizer.py", + "ast_data": "FunctionDef name:add_param_group arg:self arg:param_group arguments arg arg Call Assign If Call Assign Assign Call Assign Call For If Compare Raise Call Call Call Assign" + }, + { + "library": "sphinx", + "name": "desc_type", + "source_code": "class desc_type(nodes.Part, nodes.Inline, nodes.FixedTextElement):\n pass", + "docstring": "Node for return types or object type names.", + "type": "class", + "file_path": "sphinx\\sphinx\\addnodes.py", + "ast_data": "ClassDef name:desc_type" + }, + { + "library": "matplotlib", + "name": "set_height", + "source_code": "def set_height(self, height):\n self._height = height\n self.stale = True", + "docstring": "Set the height of the ellipse. Parameters ---------- height : float", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:set_height arg:self arg:height arguments arg arg Assign Assign" + }, + { + "library": "matplotlib", + "name": "TriFinder", + "source_code": "class TriFinder:\n\n def __init__(self, triangulation):\n _api.check_isinstance(Triangulation, triangulation=triangulation)\n self._triangulation = triangulation\n\n def __call__(self, x, y):\n raise NotImplementedError", + "docstring": "Abstract base class for classes used to find the triangles of a Triangulation in which (x, y) points lie. Rather than instantiate an object of a class derived from TriFinder, it is usually better to use the function . Derived classes implement __call__(x, y) where x and y are array-like point coordinates of the same shape.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\tri\\_trifinder.py", + "ast_data": "ClassDef name:TriFinder FunctionDef name:__init__ arg:self arg:triangulation arguments arg arg Call Assign FunctionDef name:__call__ arg:self arg:x arg:y arguments arg arg arg Raise" + }, + { + "library": "pytorch", + "name": "_canonical_dim", + "source_code": "def _canonical_dim(dim: DimOrDims, ndim: int) -> tuple[int, ...]:\n dims: list[int] = []\n if dim == ():\n dim = None\n if dim is None:\n return tuple(range(ndim))\n ndim = max(ndim, 1)\n dim_ = (dim,) if isinstance(dim, (int, torch.SymInt)) else dim\n for d in dim_:\n if d in dims:\n raise RuntimeError(f'dim={d} appears multiple times in the list of dims')\n if d >= ndim or d < -ndim:\n raise IndexError(f'Dimension out of range (expected to be in range of [{-ndim}, {ndim - 1}], but got {d})')\n dims.append(d % ndim)\n return tuple(sorted(dims))", + "docstring": "Return dim argument as a tuple of sorted dim values.", + "type": "function", + "file_path": "pytorch\\torch\\masked\\_ops.py", + "ast_data": "FunctionDef name:_canonical_dim arg:dim arg:ndim arguments arg arg If Compare Assign If Compare Return return:yes Call Call Assign Call Assign Call For If Compare Raise Call If BoolOp Compare Compare Raise Call Call Return return:yes Call Call" + }, + { + "library": "numpy", + "name": "log2", + "source_code": "@set_module('numpy.lib.scimath')\n@array_function_dispatch(_unary_dispatcher)\ndef log2(x):\n x = _fix_real_lt_zero(x)\n return nx.log2(x)", + "docstring": "Compute the logarithm base 2 of . Return the \"principal value\" (for a description of this, see ) of :math:. For real , this is a real number (`xxoutx >> np.set_printoptions(precision=4) >>> np.emath.log2(8) 3.0 >>> np.emath.log2([-4, -8, 8]) array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_scimath_impl.py", + "ast_data": "FunctionDef name:log2 arg:x arguments arg Assign Call Return return:yes Call Call Call" + }, + { + "library": "pandas", + "name": "is_indexed", + "source_code": "@property\ndef is_indexed(self) -> bool:\n if not hasattr(self.table, 'cols'):\n return False\n return getattr(self.table.cols, self.cname).is_indexed", + "docstring": "return whether I am an indexed column", + "type": "method", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:is_indexed arg:self arguments arg If Call Return return:yes Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_parse_input_saver_proto", + "source_code": "def _parse_input_saver_proto(input_saver, input_binary):\n if not gfile.Exists(input_saver):\n raise IOError(\"Input saver file '\" + input_saver + \"' does not exist!\")\n mode = 'rb' if input_binary else 'r'\n with gfile.GFile(input_saver, mode) as f:\n saver_def = saver_pb2.SaverDef()\n if input_binary:\n saver_def.ParseFromString(f.read())\n else:\n text_format.Merge(f.read(), saver_def)\n return saver_def", + "docstring": "Parses input tensorflow Saver into SaverDef proto.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\freeze_graph.py", + "ast_data": "FunctionDef name:_parse_input_saver_proto arg:input_saver arg:input_binary arguments arg arg If Call Raise Call Assign With Call Assign Call If Call Call Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "check_termination", + "source_code": "def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol):\n ftol_satisfied = dF < ftol * F and ratio > 0.25\n xtol_satisfied = dx_norm < xtol * (xtol + x_norm)\n if ftol_satisfied and xtol_satisfied:\n return 4\n elif ftol_satisfied:\n return 2\n elif xtol_satisfied:\n return 3\n else:\n return None", + "docstring": "Check termination condition for nonlinear least squares.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py", + "ast_data": "FunctionDef name:check_termination arg:dF arg:F arg:dx_norm arg:x_norm arg:ratio arg:ftol arg:xtol arguments arg arg arg arg arg arg arg Assign BoolOp Compare Compare Assign Compare If BoolOp Return return:yes If Return return:yes If Return return:yes Return return:no" + }, + { + "library": "tensorflow", + "name": "tensor_list", + "source_code": "def tensor_list(elements, element_dtype=None, element_shape=None, use_tensor_array=False):\n _validate_list_constructor(elements, element_dtype, element_shape)\n if use_tensor_array:\n return data_structures.tf_tensor_array_new(elements, element_dtype, element_shape)\n else:\n return data_structures.tf_tensor_list_new(elements, element_dtype, element_shape)", + "docstring": "Creates an tensor list and populates it with the given elements. This function provides a more uniform access to tensor lists and tensor arrays, and allows optional initialization. Note: this function is a simplified wrapper. If you need greater control, it is recommended to use the underlying implementation directly. Args: elements: Iterable[tf.Tensor, ...], the elements to initially fill the list with element_dtype: Optional[tf.DType], data type for the elements in the list; required if the list is empty element_shape: Optional[tf.TensorShape], shape for the elements in the list; required if the list is empty use_tensor_array: bool, whether to use the more compatible but restrictive tf.TensorArray implementation Returns: Union[tf.Tensor, tf.TensorArray], the new list. Raises: ValueError: for invalid arguments", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\lang\\special_functions.py", + "ast_data": "FunctionDef name:tensor_list arg:elements arg:element_dtype arg:element_shape arg:use_tensor_array arguments arg arg arg arg Call If Return return:yes Call Return return:yes Call" + }, + { + "library": "authlib", + "name": "validate_code_challenge_methods_supported", + "source_code": "def validate_code_challenge_methods_supported(self):\n validate_array_value(self, 'code_challenge_methods_supported')", + "docstring": "OPTIONAL. JSON array containing a list of Proof Key for Code Exchange (PKCE) [RFC7636] code challenge methods supported by this authorization server. Code challenge method values are used in the \"code_challenge_method\" parameter defined in Section 4.3 of [RFC7636]. The valid code challenge method values are those registered in the IANA \"PKCE Code Challenge Methods\" registry [IANA.OAuth.Parameters]. If omitted, the authorization server does not support PKCE.", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py", + "ast_data": "FunctionDef name:validate_code_challenge_methods_supported arg:self arguments arg Call" + }, + { + "library": "tensorflow", + "name": "filter_line", + "source_code": "def filter_line(self, line):\n filtered = []\n warn_msg = []\n splited = line.split('\\n')\n if not line and len(splited) < 1:\n warn_msg = '[Warning] Empty line detected while filtering lines.'\n logging.warning(warn_msg)\n self.warning_msg.append(warn_msg)\n if splited[0] == '[':\n filtered = splited[1:]\n elif '[' in splited[0]:\n splited = splited[0].replace('[', '')\n filtered = splited\n else:\n warn_msg = '[Warning] Format error. `[` could be missing in '\n warn_msg += 'the config (.ini) file. (line = %s)' % str(line)\n logging.warning(warn_msg)\n self.warning_msg.append(warn_msg)\n if filtered[-1] == ']':\n filtered = filtered[:-1]\n elif ']' in filtered[-1]:\n filtered[-1] = filtered[-1].replace(']', '')\n else:\n warn_msg = '[Warning] Format error. `]` could be missing in '\n warn_msg += 'the config (.ini) file. (line = %s)' % str(line)\n logging.warning(warn_msg)\n self.warning_msg.append(warn_msg)\n return filtered", + "docstring": "Removes or from the input line. Args: line: String that is a compatibility specification line from the config file. Returns: String that is a compatibility specification line without and .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\compat_checker\\compat_checker.py", + "ast_data": "FunctionDef name:filter_line arg:self arg:line arguments arg arg Assign Assign Assign Call If BoolOp Compare Call Assign Call Call If Compare Assign If Compare Assign Call Assign Assign Call Call Call If Compare Assign If Compare Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "django", + "name": "render_to_kml", + "source_code": "def render_to_kml(*args, **kwargs):\n return HttpResponse(loader.render_to_string(*args, **kwargs), content_type='application/vnd.google-earth.kml+xml')", + "docstring": "Render the response as KML (using the correct MIME type).", + "type": "function", + "file_path": "django\\django\\contrib\\gis\\shortcuts.py", + "ast_data": "FunctionDef name:render_to_kml arguments arg arg Return return:yes Call Call" + }, + { + "library": "django", + "name": "log_change", + "source_code": "def log_change(self, request, obj, message):\n from django.contrib.admin.models import CHANGE, LogEntry\n return LogEntry.objects.log_actions(user_id=request.user.pk, queryset=[obj], action_flag=CHANGE, change_message=message, single_object=True)", + "docstring": "Log that an object has been successfully changed. The default implementation creates an admin LogEntry object.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:log_change arg:self arg:request arg:obj arg:message arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "nanmean", + "source_code": "@bottleneck_switch()\n@_datetimelike_compat\ndef nanmean(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, mask: npt.NDArray[np.bool_] | None=None) -> float:\n dtype = values.dtype\n values, mask = _get_values(values, skipna, fill_value=0, mask=mask)\n dtype_sum = _get_dtype_max(dtype)\n dtype_count = np.dtype(np.float64)\n if dtype.kind in 'mM':\n dtype_sum = np.dtype(np.float64)\n elif dtype.kind in 'iu':\n dtype_sum = np.dtype(np.float64)\n elif dtype.kind == 'f':\n dtype_sum = dtype\n dtype_count = dtype\n count = _get_counts(values.shape, mask, axis, dtype=dtype_count)\n the_sum = values.sum(axis, dtype=dtype_sum)\n the_sum = _ensure_numeric(the_sum)\n if axis is not None and getattr(the_sum, 'ndim', False):\n count = cast(np.ndarray, count)\n with np.errstate(all='ignore'):\n the_mean = the_sum / count\n ct_mask = count == 0\n if ct_mask.any():\n the_mean[ct_mask] = np.nan\n else:\n the_mean = the_sum / count if count > 0 else np.nan\n return the_mean", + "docstring": "Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s.values) np.float64(1.5)", + "type": "function", + "file_path": "pandas\\pandas\\core\\nanops.py", + "ast_data": "FunctionDef name:nanmean arg:values arguments arg arg arg arg Assign Assign Call Assign Call Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Assign Assign Call Assign Call Assign Call If BoolOp Compare Call Assign Call With Call Assign Assign Compare If Call Assign Assign Compare Return return:yes Call" + }, + { + "library": "kornia", + "name": "_get_center_kernel2d", + "source_code": "def _get_center_kernel2d(h: int, w: int, device: Optional[torch.device]=None) -> Tensor:\n if device is None:\n device = torch.device('cpu')\n center_kernel = zeros(2, 2, h, w, device=device)\n if h % 2 != 0:\n h_i1 = h // 2\n h_i2 = h // 2 + 1\n else:\n h_i1 = h // 2 - 1\n h_i2 = h // 2 + 1\n if w % 2 != 0:\n w_i1 = w // 2\n w_i2 = w // 2 + 1\n else:\n w_i1 = w // 2 - 1\n w_i2 = w // 2 + 1\n center_kernel[(0, 1), (0, 1), h_i1:h_i2, w_i1:w_i2] = 1.0 / float((h_i2 - h_i1) * (w_i2 - w_i1))\n return center_kernel", + "docstring": "Generate a kernel to return center coordinates, when applied with F.conv2d to 2d coordinates grid. Args: h: kernel height. w: kernel width. device: device, on which generate. Returns: conv_kernel [2x2xhxw].", + "type": "function", + "file_path": "kornia\\kornia\\geometry\\subpix\\spatial_soft_argmax.py", + "ast_data": "FunctionDef name:_get_center_kernel2d arg:h arg:w arg:device arguments arg arg arg If Compare Assign Call Assign Call If Compare Assign Assign Assign Assign If Compare Assign Assign Assign Assign Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_get_full_signature_list", + "source_code": "def _get_full_signature_list(self):\n return self._interpreter.GetSignatureDefs()", + "docstring": "Gets list of SignatureDefs in the model. Example, Returns: A list of SignatureDef details in a dictionary structure. It is keyed on the SignatureDef method name, and the value holds dictionary of inputs and outputs.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py", + "ast_data": "FunctionDef name:_get_full_signature_list arg:self arguments arg Return return:yes Call" + }, + { + "library": "cherrypy", + "name": "output", + "source_code": "def output(self):\n return list(self.encode_header_items(self.items()))", + "docstring": "Transform self into a list of (name, value) tuples.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\lib\\httputil.py", + "ast_data": "FunctionDef name:output arg:self arguments arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "get_tensor_name", + "source_code": "def get_tensor_name(tensor):\n parts = tensor.name.split(':')\n if len(parts) > 2:\n raise ValueError('Tensor name invalid. Expect 0 or 1 colon, got {0}'.format(len(parts) - 1))\n if len(parts) > 1 and parts[1] != '0':\n return tensor.name\n return parts[0]", + "docstring": "Returns name of the input tensor. Args: tensor: tf.Tensor Returns: str", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py", + "ast_data": "FunctionDef name:get_tensor_name arg:tensor arguments arg Assign Call If Compare Call Raise Call Call Call If BoolOp Compare Call Compare Return return:yes Return return:yes" + }, + { + "library": "authlib", + "name": "get_op_key", + "source_code": "def get_op_key(self, operation):\n self.check_key_op(operation)\n if not self.raw_key:\n self.load_raw_key()\n return self.raw_key", + "docstring": "Get the raw key for the given key_op. This method will also check if the given key_op is supported by this key. :param operation: key operation value, such as \"sign\", \"encrypt\". :return: raw key", + "type": "method", + "file_path": "authlib\\authlib\\jose\\rfc7518\\oct_key.py", + "ast_data": "FunctionDef name:get_op_key arg:self arg:operation arguments arg arg Call If Call Return return:yes" + }, + { + "library": "pytorch", + "name": "is_valid_permutation", + "source_code": "def is_valid_permutation(rank: int, perm: DimsSequenceType) -> bool:\n return isinstance(perm, Sequence) and sorted(perm) == list(range(rank))", + "docstring": "Validates that perm is a permutation of length rank.", + "type": "function", + "file_path": "pytorch\\torch\\_prims_common\\__init__.py", + "ast_data": "FunctionDef name:is_valid_permutation arg:rank arg:perm arguments arg arg Return return:yes BoolOp Call Compare Call Call Call" + }, + { + "library": "pytorch", + "name": "get_free_symbol_uses", + "source_code": "def get_free_symbol_uses(self, unbacked_only: bool=False) -> OrderedSet[sympy.Symbol]:\n return OrderedSet()", + "docstring": "When unbacked_only=True: Returns the unbacked symbols which are required to be in scope in order to successfully perform codegen for this buffer. For example, a buffer that corresponds to an extern kernel call that takes i0 as an argument would return {i0} here. This is used to generate necessary dependencies that ensure we actually bind i0 in codegen before you try to use it. Note that this is NOT transitive; in particular, if this buffer takes in as input another buffer with dynamic shape (e.g., (i0,)), we will not report it here, because you will already have a dependency on that buffer, which will eventually have a dependency on i0 if necessary. When unbacked_only=False: Similar to but including all free symbols instead of only free unbacked symbols.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "FunctionDef name:get_free_symbol_uses arg:self arg:unbacked_only arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "Ones", + "source_code": "@tf_export(v1=['initializers.ones', 'ones_initializer'])\n@deprecation.deprecated_endpoints('initializers.ones', 'ones_initializer')\nclass Ones(Initializer):\n\n @deprecated_args(None, 'Call initializer instance with the dtype argument instead of passing it to the constructor', 'dtype')\n def __init__(self, dtype=dtypes.float32):\n self.dtype = dtypes.as_dtype(dtype)\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n return array_ops.ones(shape, dtype)\n\n def get_config(self):\n return {'dtype': self.dtype.name}", + "docstring": "Initializer that generates tensors initialized to 1. @compatibility(TF2) This API is compatible with TF2 behavior and , and can be migrated immediately with . Before: >>> initializer = tf.compat.v1.keras.initializers.ones() >>> initializer((1, 1)) After: >>> initializer = tf.keras.initializers.ones() >>> initializer((1, 1)) @end_compatibility", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py", + "ast_data": "ClassDef name:Ones FunctionDef name:__init__ arg:self arg:dtype arguments arg arg Assign Call Call FunctionDef name:__call__ arg:self arg:shape arg:dtype arg:partition_info arguments arg arg arg arg If Compare Assign Return return:yes Call FunctionDef name:get_config arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "matplotlib", + "name": "get_zbound", + "source_code": "def get_zbound(self):\n lower, upper = self.get_zlim()\n if lower < upper:\n return (lower, upper)\n else:\n return (upper, lower)", + "docstring": "Return the lower and upper z-axis bounds, in increasing order. See Also -------- set_zbound get_zlim, set_zlim invert_zaxis, zaxis_inverted", + "type": "method", + "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", + "ast_data": "FunctionDef name:get_zbound arg:self arguments arg Assign Call If Compare Return return:yes Return return:yes" + }, + { + "library": "django", + "name": "find_location", + "source_code": "def find_location(self, root, path, prefix=None):\n if prefix:\n prefix = '%s%s' % (prefix, os.sep)\n if not path.startswith(prefix):\n return None\n path = path.removeprefix(prefix)\n path = safe_join(root, path)\n if os.path.exists(path):\n return path", + "docstring": "Find a requested static file in a location and return the found absolute path (or `` if no match).", + "type": "method", + "file_path": "django\\django\\contrib\\staticfiles\\finders.py", + "ast_data": "FunctionDef name:find_location arg:self arg:root arg:path arg:prefix arguments arg arg arg arg If Assign If Call Return return:no Assign Call Assign Call If Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "is_layouts_same", + "source_code": "def is_layouts_same(self, embedding_layouts) -> bool:\n if self._checkpoint_layouts.keys() != embedding_layouts.keys():\n raise ValueError('Layouts in checkpoint and embedding must have the same keys. found {} and {}'.format(self._checkpoint_layouts.keys(), embedding_layouts.keys()))\n for key, layout in self._checkpoint_layouts.items():\n if not compare.ProtoEq(layout, embedding_layouts[key]):\n logging.info('Layouts do not match for %s this will require resharding; %s vs %s', key, layout, embedding_layouts[key])\n return False\n return True", + "docstring": "Returns True if the all the embedding and checkpoint layouts are the same. Args: embedding_layouts: dict of layouts for embedding tables. Raises: ValueError if the embedding layouts and checkpoint layouts do not have the same keys. Returns: Bool representing if the embedding layouts match the layouts in checkpoint.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_checkpoint_adapter.py", + "ast_data": "FunctionDef name:is_layouts_same arg:self arg:embedding_layouts arguments arg arg If Compare Call Call Raise Call Call Call Call For Call If Call Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "_init_from_metadata", + "source_code": "@classmethod\ndef _init_from_metadata(cls, metadata):\n init_args = dict(name=metadata['name'], trainable=metadata['trainable'])\n if metadata.get('dtype') is not None:\n init_args['dtype'] = metadata['dtype']\n if metadata.get('batch_input_shape') is not None:\n init_args['batch_input_shape'] = metadata['batch_input_shape']\n revived_obj = cls(**init_args)\n with utils.no_automatic_dependency_tracking_scope(revived_obj):\n revived_obj._expects_training_arg = metadata['expects_training_arg']\n config = metadata.get('config')\n if generic_utils.validate_config(config):\n revived_obj._config = config\n if metadata.get('input_spec') is not None:\n revived_obj.input_spec = recursively_deserialize_keras_object(metadata['input_spec'], module_objects={'InputSpec': input_spec.InputSpec})\n if metadata.get('activity_regularizer') is not None:\n revived_obj.activity_regularizer = regularizers.deserialize(metadata['activity_regularizer'])\n if metadata.get('_is_feature_layer') is not None:\n revived_obj._is_feature_layer = metadata['_is_feature_layer']\n if metadata.get('stateful') is not None:\n revived_obj.stateful = metadata['stateful']\n return (revived_obj, _revive_setter)", + "docstring": "Create revived layer from metadata stored in the SavedModel proto.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py", + "ast_data": "FunctionDef name:_init_from_metadata arg:cls arg:metadata arguments arg arg Assign Call If Compare Call Assign If Compare Call Assign Assign Call With Call Assign Assign Call If Call Assign If Compare Call Assign Call If Compare Call Assign Call If Compare Call Assign If Compare Call Assign Return return:yes" + }, + { + "library": "cryptography", + "name": "__copy__", + "source_code": "@abc.abstractmethod\ndef __copy__(self) -> DSAPrivateKey:\n pass", + "docstring": "Returns a copy.", + "type": "method", + "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py", + "ast_data": "FunctionDef name:__copy__ arg:self arguments arg" + }, + { + "library": "pytorch", + "name": "_get_module_type_filter", + "source_code": "def _get_module_type_filter(tp: Callable):\n tp_str = tp.__module__ + '.' + tp.__qualname__\n\n def module_type_filter(n: Node) -> bool:\n nn_module_stack = n.meta.get('nn_module_stack', {})\n types = []\n for _, t in nn_module_stack.values():\n if isinstance(t, type):\n t = t.__module__ + '.' + t.__qualname__\n types.append(t)\n return tp_str in types\n return module_type_filter", + "docstring": "Get the module_type_filter function for a given module type, the filter accepts a node and checks if the node comes from a module that has certain module type For example: node: linear_op = call_function # comes from a module with type Block -> Sub -> Linear >> module_type_filter = _get_module_type_filter(Sub) # submodule with type , under the submodule >> print(module_type_filter(node)) True # the node is from the submodule (same for and as well)", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\xnnpack_quantizer.py", + "ast_data": "FunctionDef name:_get_module_type_filter arg:tp arguments arg Assign FunctionDef name:module_type_filter arg:n arguments arg Assign Call Assign For Call If Call Assign Call Return return:yes Compare Return return:yes" + }, + { + "library": "pytorch", + "name": "post_reshard", + "source_code": "def post_reshard(self):\n if self._uses_param_mixed_precision and (not self.uses_sharded_strategy) and (not self._force_full_precision):\n self._free_low_precision_sharded_param()", + "docstring": "Run the post-reshard logic. This includes freeing any memory that can now be freed given that the `` 's data points to the full precision sharded flat parameter.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py", + "ast_data": "FunctionDef name:post_reshard arg:self arguments arg If BoolOp Call" + }, + { + "library": "tensorflow", + "name": "generate_dequeue_op", + "source_code": "def generate_dequeue_op(self, tpu_device=0):\n self.freeze()\n if self._generated_dequeue_op and (not ops.inside_function()):\n raise ValueError(\"Can't generate two dequeue Ops from the same queue\")\n self._generated_dequeue_op = True\n full_name = '%s/dequeue' % self._name\n sharded_shapes = [policy.get_unpartitioned_shape(policy.get_sharded_shape(shape)) for shape, policy in zip(self._tuple_shapes, self._sharding_policies)]\n if tpu_device is not None:\n with ops.device(tpu_name_util.core(tpu_device)):\n dequeue_op = tpu_ops.infeed_dequeue_tuple(dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)\n else:\n dequeue_op = tpu_ops.infeed_dequeue_tuple(dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)\n if self._number_of_partitions <= 1:\n return dequeue_op\n partitions = [policy.get_unpartitioned_shape([1] * shape.ndims).as_list() for shape, policy in zip(self._tuple_shapes, self._sharding_policies)]\n return tag_sharding_attribute_for_dequeued_tensors(dequeue_op, partitions)", + "docstring": "Generates the device-side Op to dequeue a tuple from the queue. Implicitly freezes the queue configuration if it is not already frozen, which will raise errors if the shapes and types have not been fully specified. Args: tpu_device: The TPU device ordinal where the infeed instruction should be placed. If None, no explicit placement will be performed, and it is up to the user to call this API from within a proper TPU device scope. The XLA code will fail if the TPU dequeue instruction is not bound to any device. Returns: A list of Outputs corresponding to a shard of infeed dequeued into XLA, suitable for use within a replicated block. Raises: ValueError: if the types or shapes of the tuple elements have not been set; or if a dequeue op has already been generated.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py", + "ast_data": "FunctionDef name:generate_dequeue_op arg:self arg:tpu_device arguments arg arg Call If BoolOp Call Raise Call Assign Assign Assign Call Call Call If Compare With Call Call Assign Call Assign Call If Compare Return return:yes Assign Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "immutable_dict", + "source_code": "@compatibility(is_backward_compatible=True)\nclass immutable_dict(dict[_KT, _VT]):\n __delitem__ = _no_mutation\n __ior__ = _no_mutation\n __setitem__ = _no_mutation\n clear = _no_mutation\n pop = _no_mutation\n popitem = _no_mutation\n setdefault = _no_mutation\n update = _no_mutation\n\n def __hash__(self) -> int:\n return hash(frozenset(self.items()))\n\n def __reduce__(self) -> tuple[type[Self], tuple[tuple[tuple[_KT, _VT], ...]]]:\n return (type(self), (tuple(self.items()),))", + "docstring": "An immutable version of :class:.", + "type": "class", + "file_path": "pytorch\\torch\\fx\\immutable_collections.py", + "ast_data": "ClassDef name:immutable_dict Assign Assign Assign Assign Assign Assign Assign Assign FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call Call Call FunctionDef name:__reduce__ arg:self arguments arg Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_LoadDataset", + "source_code": "class _LoadDataset(dataset_ops.DatasetSource):\n\n def __init__(self, path: str, element_spec: Any, compression: str, reader_func: Callable[[dataset_ops.Dataset], dataset_ops.Dataset]):\n self._path = path\n self._element_spec = element_spec\n self._compression = compression\n self._reader_func = structured_function.StructuredFunctionWrapper(reader_func, 'load()', input_structure=dataset_ops.DatasetSpec(dataset_ops.DatasetSpec(self._element_spec)))\n variant_tensor = ged_ops.load_dataset(path, reader_func_other_args=self._reader_func.function.captured_inputs, compression=compression, reader_func=self._reader_func.function, **self._flat_structure)\n super().__init__(variant_tensor)\n\n @property\n def element_spec(self) -> Any:\n return self._element_spec", + "docstring": "A dataset that loads previously saved dataset.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\load_op.py", + "ast_data": "ClassDef name:_LoadDataset FunctionDef name:__init__ arg:self arg:path arg:element_spec arg:compression arg:reader_func arguments arg arg arg arg arg Assign Assign Assign Assign Call Call Call Assign Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_ExecutionInfo", + "source_code": "class _ExecutionInfo:\n\n def __init__(self, root_module: nn.Module) -> None:\n self.curr_module: nn.Module = root_module\n self.module_forward_order: list[nn.Module] = [root_module]\n self.module_to_param_usage_infos: dict[nn.Module, list[_ParamUsageInfo]] = {root_module: []}\n self.param_forward_order: list[nn.Parameter] = []\n self.visited_params: set[nn.Parameter] = set()", + "docstring": "This represents the execution order information from the forward pass. Attributes: curr_module (nn.Module): Current module being traced. module_forward_order (List[nn.Module]): The modules in (pre-)forward order, i.e. the order in which their `_ParamUsageInfo`.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\fsdp\\_trace_utils.py", + "ast_data": "ClassDef name:_ExecutionInfo FunctionDef name:__init__ arg:self arg:root_module arguments arg arg Call" + }, + { + "library": "tensorflow", + "name": "nnz", + "source_code": "def nnz(self):\n return sm_ops.sparse_matrix_nnz(self._matrix)", + "docstring": "Number of stored values, including explicit zeros.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_ops.py", + "ast_data": "FunctionDef name:nnz arg:self arguments arg Return return:yes Call" + }, + { + "library": "pandas", + "name": "_get_window_indexer", + "source_code": "def _get_window_indexer(self) -> GroupbyIndexer:\n window_indexer = GroupbyIndexer(groupby_indices=self._grouper.indices, window_indexer=ExpandingIndexer)\n return window_indexer", + "docstring": "Return an indexer class that will compute the window start and end bounds Returns ------- GroupbyIndexer", + "type": "method", + "file_path": "pandas\\pandas\\core\\window\\expanding.py", + "ast_data": "FunctionDef name:_get_window_indexer arg:self arguments arg Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "has_resource", + "source_code": "def has_resource(self, feature_column, name):\n del feature_column, name\n raise NotImplementedError('StateManager.has_resource')", + "docstring": "Returns true iff a resource with same name exists. Resources can be things such as tables, variables, trackables, etc. Args: feature_column: A object this variable corresponds to. name: Name of the resource.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:has_resource arg:self arg:feature_column arg:name arguments arg arg arg Raise Call" + }, + { + "library": "pytorch", + "name": "_reduce_aot_config", + "source_code": "def _reduce_aot_config(self, aot_config: AOTConfig):\n return (_ident, (aot_config.num_params_buffers, aot_config.keep_inference_input_mutations, aot_config.is_export, aot_config.no_tangents, aot_config.dynamic_shapes, aot_config.aot_autograd_arg_pos_to_source, aot_config.enable_log, aot_config.pre_dispatch))", + "docstring": "Reduce the config to a stable key for caching.", + "type": "method", + "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py", + "ast_data": "FunctionDef name:_reduce_aot_config arg:self arg:aot_config arguments arg arg Return return:yes" + }, + { + "library": "django", + "name": "_add_hints", + "source_code": "def _add_hints(self, **hints):\n self._hints.update(hints)", + "docstring": "Update hinting information for use by routers. Add new key/values or overwrite existing key/values.", + "type": "method", + "file_path": "django\\django\\db\\models\\query.py", + "ast_data": "FunctionDef name:_add_hints arg:self arguments arg arg Call" + }, + { + "library": "numpy", + "name": "_add_trailing_padding", + "source_code": "def _add_trailing_padding(value, padding):\n if value.fields is None:\n field_spec = {'names': ['f0'], 'formats': [value], 'offsets': [0], 'itemsize': value.itemsize}\n else:\n fields = value.fields\n names = value.names\n field_spec = {'names': names, 'formats': [fields[name][0] for name in names], 'offsets': [fields[name][1] for name in names], 'itemsize': value.itemsize}\n field_spec['itemsize'] += padding\n return dtype(field_spec)", + "docstring": "Inject the specified number of padding bytes at the end of a dtype", + "type": "function", + "file_path": "numpy\\numpy\\_core\\_internal.py", + "ast_data": "FunctionDef name:_add_trailing_padding arg:value arg:padding arguments arg arg If Compare Assign Assign Assign Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "dtype", + "source_code": "@property\ndef dtype(self):\n raise NotImplementedError", + "docstring": "The of this variable.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", + "ast_data": "FunctionDef name:dtype arg:self arguments arg Raise" + }, + { + "library": "pytorch", + "name": "register_state_dict_pre_hook", + "source_code": "def register_state_dict_pre_hook(self, hook: Callable[['Optimizer'], None], prepend: bool=False) -> RemovableHandle:\n handle = hooks.RemovableHandle(self._optimizer_state_dict_pre_hooks)\n self._optimizer_state_dict_pre_hooks[handle.id] = hook\n if prepend:\n self._optimizer_state_dict_pre_hooks.move_to_end(handle.id, last=False)\n return handle", + "docstring": "Register a state dict pre-hook which will be called before :meth: is called. It should have the following signature:: hook(optimizer) -> None The `torch.utils.hooks.RemoveableHandle`", + "type": "method", + "file_path": "pytorch\\torch\\optim\\optimizer.py", + "ast_data": "FunctionDef name:register_state_dict_pre_hook arg:self arg:hook arg:prepend arguments arg arg arg Assign Call Assign If Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "Round4", + "source_code": "@_register_style(_style_list)\nclass Round4:\n\n def __init__(self, pad=0.3, rounding_size=None):\n self.pad = pad\n self.rounding_size = rounding_size\n\n def __call__(self, x0, y0, width, height, mutation_size):\n pad = mutation_size * self.pad\n if self.rounding_size:\n dr = mutation_size * self.rounding_size\n else:\n dr = pad / 2.0\n width = width + 2 * pad - 2 * dr\n height = height + 2 * pad - 2 * dr\n x0, y0 = (x0 - pad + dr, y0 - pad + dr)\n x1, y1 = (x0 + width, y0 + height)\n cp = [(x0, y0), (x0 + dr, y0 - dr), (x1 - dr, y0 - dr), (x1, y0), (x1 + dr, y0 + dr), (x1 + dr, y1 - dr), (x1, y1), (x1 - dr, y1 + dr), (x0 + dr, y1 + dr), (x0, y1), (x0 - dr, y1 - dr), (x0 - dr, y0 + dr), (x0, y0), (x0, y0)]\n com = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CURVE4, Path.CLOSEPOLY]\n return Path(cp, com)", + "docstring": "A box with rounded edges.", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "ClassDef name:Round4 FunctionDef name:__init__ arg:self arg:pad arg:rounding_size arguments arg arg arg Assign Assign FunctionDef name:__call__ arg:self arg:x0 arg:y0 arg:width arg:height arg:mutation_size arguments arg arg arg arg arg arg Assign If Assign Assign Assign Assign Assign Assign Assign Assign Return return:yes Call Call" + }, + { + "library": "authlib", + "name": "validate_c_hash", + "source_code": "def validate_c_hash(self):\n code = self.params.get('code')\n c_hash = self.get('c_hash')\n if code:\n if not c_hash:\n raise MissingClaimError('c_hash')\n if not _verify_hash(c_hash, code, self.header['alg']):\n raise InvalidClaimError('c_hash')", + "docstring": "Code hash value. Its value is the base64url encoding of the left-most half of the hash of the octets of the ASCII representation of the code value, where the hash algorithm used is the hash algorithm used in the alg Header Parameter of the ID Token's JOSE Header. For instance, if the alg is HS512, hash the code value with SHA-512, then take the left-most 256 bits and base64url encode them. The c_hash value is a case sensitive string. If the ID Token is issued from the Authorization Endpoint with a code, which is the case for the response_type values code id_token and code id_token token, this is REQUIRED; otherwise, its inclusion is OPTIONAL.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\core\\claims.py", + "ast_data": "FunctionDef name:validate_c_hash arg:self arguments arg Assign Call Assign Call If If Raise Call If Call Raise Call" + }, + { + "library": "pytorch", + "name": "get_workspace_size", + "source_code": "def get_workspace_size(self) -> int:\n return 0", + "docstring": "Gets extra global memory size needed by this buffer. Some algorithms (e.g. group gemm) may require extra global memory in the generated code.", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\ir.py", + "ast_data": "FunctionDef name:get_workspace_size arg:self arguments arg Return return:yes" + }, + { + "library": "tensorflow", + "name": "TypeRegistry", + "source_code": "class TypeRegistry(object):\n\n def __init__(self):\n self._registry = {}\n\n def register(self, obj, value):\n if obj in self._registry:\n raise KeyError(f'{type(obj)} has already been registered.')\n self._registry[obj] = value\n\n def lookup(self, obj):\n for registered in self._registry:\n if isinstance(obj, registered):\n return self._registry[registered]\n raise LookupError(f'{type(obj)} has not been registered.')", + "docstring": "Provides a type registry for the python registry pattern. Contains mappings between types and type specific objects, to implement the registry pattern. Some example uses of this would be to register different functions depending on the type of object.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\utils\\type_registry.py", + "ast_data": "ClassDef name:TypeRegistry FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:register arg:self arg:obj arg:value arguments arg arg arg If Compare Raise Call Call Assign FunctionDef name:lookup arg:self arg:obj arguments arg arg For If Call Return return:yes Raise Call Call" + }, + { + "library": "pandas", + "name": "__init__", + "source_code": "@doc(storage_options=_shared_docs['storage_options'])\ndef __init__(self, filepath_or_buffer: FilePath | ReadBuffer[bytes], storage_options: StorageOptions | None=None, engine_kwargs: dict | None=None) -> None:\n import_optional_dependency('python_calamine')\n super().__init__(filepath_or_buffer, storage_options=storage_options, engine_kwargs=engine_kwargs)", + "docstring": "Reader using calamine engine (xlsx/xls/xlsb/ods). Parameters ---------- filepath_or_buffer : str, path to be parsed or an open readable stream. {storage_options} engine_kwargs : dict, optional Arbitrary keyword arguments passed to excel engine.", + "type": "method", + "file_path": "pandas\\pandas\\io\\excel\\_calamine.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:filepath_or_buffer arg:storage_options arg:engine_kwargs arguments arg arg arg arg Call Call Call Call" + }, + { + "library": "pytorch", + "name": "ConvBnReLU2d", + "source_code": "class ConvBnReLU2d(_FusedModule):\n\n def __init__(self, conv, bn, relu):\n assert type_before_parametrizations(conv) == Conv2d and type_before_parametrizations(bn) == BatchNorm2d and (type_before_parametrizations(relu) == ReLU), f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}{type_before_parametrizations(relu)}'\n super().__init__(conv, bn, relu)", + "docstring": "This is a sequential container which calls the Conv 2d, Batch Norm 2d, and ReLU modules. During quantization this will be replaced with the corresponding fused module.", + "type": "class", + "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py", + "ast_data": "ClassDef name:ConvBnReLU2d FunctionDef name:__init__ arg:self arg:conv arg:bn arg:relu arguments arg arg arg arg BoolOp Compare Call Compare Call Compare Call Call Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "_fit_encoding_binary_or_continuous", + "source_code": "def _fit_encoding_binary_or_continuous(self, X_ordinal, y, n_categories, target_mean):\n if self.smooth == 'auto':\n y_variance = np.var(y)\n encodings = _fit_encoding_fast_auto_smooth(X_ordinal, y, n_categories, target_mean, y_variance)\n else:\n encodings = _fit_encoding_fast(X_ordinal, y, n_categories, self.smooth, target_mean)\n return encodings", + "docstring": "Learn target encodings.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_target_encoder.py", + "ast_data": "FunctionDef name:_fit_encoding_binary_or_continuous arg:self arg:X_ordinal arg:y arg:n_categories arg:target_mean arguments arg arg arg arg arg If Compare Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "mark_plot_labels", + "source_code": "def mark_plot_labels(app, document):\n for name, explicit in document.nametypes.items():\n if not explicit:\n continue\n labelid = document.nameids[name]\n if labelid is None:\n continue\n node = document.ids[labelid]\n if node.tagname in ('html_only', 'latex_only'):\n for n in node:\n if n.tagname == 'figure':\n sectname = name\n for c in n:\n if c.tagname == 'caption':\n sectname = c.astext()\n break\n node['ids'].remove(labelid)\n node['names'].remove(name)\n n['ids'].append(labelid)\n n['names'].append(name)\n document.settings.env.labels[name] = (document.settings.env.docname, labelid, sectname)\n break", + "docstring": "To make plots referenceable, we need to move the reference from the \"htmlonly\" (or \"latexonly\") node to the actual figure node itself.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\sphinxext\\plot_directive.py", + "ast_data": "FunctionDef name:mark_plot_labels arg:app arg:document arguments arg arg For Call If Assign If Compare Assign If Compare For If Compare Assign For If Compare Assign Call Call Call Call Call Assign" + }, + { + "library": "tensorflow", + "name": "ShardByTaskPolicy", + "source_code": "@tf_export.tf_export('train.experimental.ShardByTaskPolicy')\nclass ShardByTaskPolicy(sharding_util.ShardingCallback):\n\n @property\n def description(self) -> str:\n return 'Split tensors into shards based on their device spec task.'\n\n def __call__(self, shardable_tensors: Sequence[sharding_util.ShardableTensor]) -> Sequence[sharding_util.Shard]:\n tensors_by_task = {}\n for shardable_tensor in shardable_tensors:\n tensor = shardable_tensor.tensor\n checkpoint_key = shardable_tensor.checkpoint_key\n slice_spec = shardable_tensor.slice_spec\n tensors_by_task.setdefault(checkpoint_key, {})[slice_spec] = tensor\n return [tensors_by_task]", + "docstring": "Policy that splits tensors into shards based on their device spec task.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\sharding\\sharding_policies.py", + "ast_data": "ClassDef name:ShardByTaskPolicy FunctionDef name:description arg:self arguments arg Return return:yes FunctionDef name:__call__ arg:self arg:shardable_tensors arguments arg arg Assign For Assign Assign Assign Assign Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "_is_subdtype", + "source_code": "def _is_subdtype(dtype, dtypes):\n dtypes = dtypes if isinstance(dtypes, list) else [dtypes]\n mapping = {'i': np.integer, 'f': np.floating, 'c': np.complexfloating, 'n': np.number}\n dtypes = [mapping.get(x, x) for x in dtypes]\n return any((np.issubdtype(dtype, dt) for dt in dtypes))", + "docstring": "Shorthand for calculating whether dtype is subtype of some dtypes. Also allows specifying a list instead of just a single dtype. Additionaly, the most important supertypes from can optionally be specified using abbreviations as follows: \"i\": np.integer \"f\": np.floating \"c\": np.complexfloating \"n\": np.number (contains the other three)", + "type": "function", + "file_path": "scipy\\scipy\\special\\_basic.py", + "ast_data": "FunctionDef name:_is_subdtype arg:dtype arg:dtypes arguments arg arg Assign Call Assign Assign Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n check_is_fitted(self)\n return self._loss.link.inverse(self._raw_predict(X).ravel())", + "docstring": "Predict values for X. Parameters ---------- X : array-like, shape (n_samples, n_features) The input samples. Returns ------- y : ndarray, shape (n_samples,) The predicted values.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "maybe_layout_constraints", + "source_code": "def maybe_layout_constraints(fn: Callable[..., Any]) -> Optional[Callable[..., Any]]:\n if not isinstance(fn, torch._ops.OpOverload):\n return None\n if fn in _maybe_layout_constraints:\n return _maybe_layout_constraints[fn]\n return None", + "docstring": "Get layout constraints. Returns None if there are no layout constraints.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\lowering.py", + "ast_data": "FunctionDef name:maybe_layout_constraints arg:fn arguments arg If Call Return return:no If Compare Return return:yes Return return:no" + }, + { + "library": "pytorch", + "name": "unregister_custom_op_symbolic", + "source_code": "def unregister_custom_op_symbolic(symbolic_name: str, opset_version: int):\n if symbolic_name.startswith('::'):\n symbolic_name = f'aten{symbolic_name}'\n _verify_custom_op_name(symbolic_name)\n registration.registry.unregister(symbolic_name, opset_version)", + "docstring": "Unregisters ``. See \"Custom Operators\" in the module documentation for an example usage. Args: symbolic_name (str): The name of the custom operator in \"::\" format. opset_version (int): The ONNX opset version in which to unregister.", + "type": "function", + "file_path": "pytorch\\torch\\onnx\\utils.py", + "ast_data": "FunctionDef name:unregister_custom_op_symbolic arg:symbolic_name arg:opset_version arguments arg arg If Call Assign Call Call" + }, + { + "library": "cherrypy", + "name": "graft", + "source_code": "def graft(self, wsgi_callable, script_name=''):\n script_name = script_name.rstrip('/')\n self.apps[script_name] = wsgi_callable", + "docstring": "Mount a wsgi callable at the given script_name.", + "type": "method", + "file_path": "cherrypy\\cherrypy\\_cptree.py", + "ast_data": "FunctionDef name:graft arg:self arg:wsgi_callable arg:script_name arguments arg arg arg Assign Call Assign" + }, + { + "library": "matplotlib", + "name": "get_offset", + "source_code": "def get_offset(self):\n return self._offset", + "docstring": "Return offset of the container.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", + "ast_data": "FunctionDef name:get_offset arg:self arguments arg Return return:yes" + }, + { + "library": "pandas", + "name": "get_chunk", + "source_code": "def get_chunk(self, size: int | None=None) -> DataFrame:\n if size is None:\n size = self._chunksize\n return self.read(nrows=size)", + "docstring": "Reads lines from Stata file and returns as dataframe Parameters ---------- size : int, defaults to None Number of lines to read. If None, reads whole file. Returns ------- DataFrame", + "type": "method", + "file_path": "pandas\\pandas\\io\\stata.py", + "ast_data": "FunctionDef name:get_chunk arg:self arg:size arguments arg arg If Compare Assign Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "OnRunStartResponse", + "source_code": "class OnRunStartResponse:\n\n def __init__(self, action, debug_urls, debug_ops='DebugIdentity', node_name_regex_allowlist=None, op_type_regex_allowlist=None, tensor_dtype_regex_allowlist=None, tolerate_debug_op_creation_failures=False):\n _check_type(action, str)\n self.action = action\n _check_type(debug_urls, list)\n self.debug_urls = debug_urls\n self.debug_ops = debug_ops\n self.node_name_regex_allowlist = node_name_regex_allowlist\n self.op_type_regex_allowlist = op_type_regex_allowlist\n self.tensor_dtype_regex_allowlist = tensor_dtype_regex_allowlist\n self.tolerate_debug_op_creation_failures = tolerate_debug_op_creation_failures", + "docstring": "Request from an on-run-start callback. The caller of the callback can use this response object to specify what action the debug-wrapper session actually takes on the run() call.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py", + "ast_data": "ClassDef name:OnRunStartResponse FunctionDef name:__init__ arg:self arg:action arg:debug_urls arg:debug_ops arg:node_name_regex_allowlist arg:op_type_regex_allowlist arg:tensor_dtype_regex_allowlist arg:tolerate_debug_op_creation_failures arguments arg arg arg arg arg arg arg arg Call Assign Call Assign Assign Assign Assign Assign Assign" + }, + { + "library": "pandas", + "name": "_set_tz", + "source_code": "def _set_tz(values: npt.NDArray[np.int64], tz: str | tzinfo | None, datetime64_dtype: str) -> DatetimeArray:\n assert values.dtype == 'i8', values.dtype\n unit, _ = np.datetime_data(datetime64_dtype)\n dtype = tz_to_dtype(tz=tz, unit=unit)\n dta = DatetimeArray._from_sequence(values, dtype=dtype)\n return dta", + "docstring": "Coerce the values to a DatetimeArray with appropriate tz. Parameters ---------- values : ndarray[int64] tz : str, tzinfo, or None datetime64_dtype : str, e.g. \"datetime64[ns]\", \"datetime64[25s]\"", + "type": "function", + "file_path": "pandas\\pandas\\io\\pytables.py", + "ast_data": "FunctionDef name:_set_tz arg:values arg:tz arg:datetime64_dtype arguments arg arg arg Compare Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_remove_dropped_categories", + "source_code": "def _remove_dropped_categories(self, categories, i):\n if self._drop_idx_after_grouping is not None and self._drop_idx_after_grouping[i] is not None:\n return np.delete(categories, self._drop_idx_after_grouping[i])\n return categories", + "docstring": "Remove dropped categories.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\preprocessing\\_encoders.py", + "ast_data": "FunctionDef name:_remove_dropped_categories arg:self arg:categories arg:i arguments arg arg arg If BoolOp Compare Compare Return return:yes Call Return return:yes" + }, + { + "library": "django", + "name": "formfield_for_manytomany", + "source_code": "def formfield_for_manytomany(self, db_field, request, **kwargs):\n if not db_field.remote_field.through._meta.auto_created:\n return None\n db = kwargs.get('using')\n if 'widget' not in kwargs:\n autocomplete_fields = self.get_autocomplete_fields(request)\n if db_field.name in autocomplete_fields:\n kwargs['widget'] = AutocompleteSelectMultiple(db_field, self.admin_site, using=db)\n elif db_field.name in self.raw_id_fields:\n kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.remote_field, self.admin_site, using=db)\n elif db_field.name in [*self.filter_vertical, *self.filter_horizontal]:\n kwargs['widget'] = widgets.FilteredSelectMultiple(db_field.verbose_name, db_field.name in self.filter_vertical)\n if 'queryset' not in kwargs:\n queryset = self.get_field_queryset(db, db_field, request)\n if queryset is not None:\n kwargs['queryset'] = queryset\n form_field = db_field.formfield(**kwargs)\n if isinstance(form_field.widget, SelectMultiple) and form_field.widget.allow_multiple_selected and (not isinstance(form_field.widget, (CheckboxSelectMultiple, AutocompleteSelectMultiple))):\n msg = _('Hold down “Control”, or “Command” on a Mac, to select more than one.')\n help_text = form_field.help_text\n form_field.help_text = format_lazy('{} {}', help_text, msg) if help_text else msg\n return form_field", + "docstring": "Get a form Field for a ManyToManyField.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\options.py", + "ast_data": "FunctionDef name:formfield_for_manytomany arg:self arg:db_field arg:request arguments arg arg arg arg If Return return:no Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Compare If Compare Assign Call If Compare Assign Assign Call If BoolOp Call Call Assign Call Assign Assign Call Return return:yes" + }, + { + "library": "numpy", + "name": "roots", + "source_code": "def roots(self):\n roots = self._roots(self.coef)\n return pu.mapdomain(roots, self.window, self.domain)", + "docstring": "Return the roots of the series polynomial. Compute the roots for the series. Note that the accuracy of the roots decreases the further outside the they lie. Returns ------- roots : ndarray Array containing the roots of the series.", + "type": "method", + "file_path": "numpy\\numpy\\polynomial\\_polybase.py", + "ast_data": "FunctionDef name:roots arg:self arguments arg Assign Call Return return:yes Call" + }, + { + "library": "scikit-learn", + "name": "bhtsne", + "source_code": "def bhtsne(X):\n n_iter = -1\n return (run_bh_tsne(X, use_pca=False, perplexity=args.perplexity, verbose=args.verbose > 0), n_iter)", + "docstring": "Wrapper for the reference lvdmaaten/bhtsne implementation.", + "type": "function", + "file_path": "scikit-learn\\benchmarks\\bench_tsne_mnist.py", + "ast_data": "FunctionDef name:bhtsne arg:X arguments arg Assign Return return:yes Call Compare" + }, + { + "library": "tensorflow", + "name": "_Conv2DBackpropInputGrad", + "source_code": "@ops.RegisterGradient('Conv2DBackpropInput')\ndef _Conv2DBackpropInputGrad(op: ops.Operation, grad):\n return [None, gen_nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]), op.inputs[2], dilations=op.get_attr('dilations'), strides=op.get_attr('strides'), padding=op.get_attr('padding'), explicit_paddings=op.get_attr('explicit_paddings'), use_cudnn_on_gpu=op.get_attr('use_cudnn_on_gpu'), data_format=op.get_attr('data_format').decode()), gen_nn_ops.conv2d(grad, op.inputs[1], dilations=op.get_attr('dilations'), strides=op.get_attr('strides'), padding=op.get_attr('padding'), explicit_paddings=op.get_attr('explicit_paddings'), use_cudnn_on_gpu=op.get_attr('use_cudnn_on_gpu'), data_format=op.get_attr('data_format').decode())]", + "docstring": "The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py", + "ast_data": "FunctionDef name:_Conv2DBackpropInputGrad arg:op arg:grad arguments arg arg Return return:yes Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call Call" + }, + { + "library": "numpy", + "name": "_fix_int_lt_zero", + "source_code": "def _fix_int_lt_zero(x):\n x = asarray(x)\n if any(isreal(x) & (x < 0)):\n x = x * 1.0\n return x", + "docstring": "Convert to double if it has real, negative components. Otherwise, output is just the array version of the input (via asarray). Parameters ---------- x : array_like Returns ------- array Examples -------- >>> import numpy as np >>> np.lib.scimath._fix_int_lt_zero([1,2]) array([1, 2]) >>> np.lib.scimath._fix_int_lt_zero([-1,2]) array([-1., 2.])", + "type": "function", + "file_path": "numpy\\numpy\\lib\\_scimath_impl.py", + "ast_data": "FunctionDef name:_fix_int_lt_zero arg:x arguments arg Assign Call If Call Call Compare Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_collective_to_overlappable_nodes", + "source_code": "def _get_collective_to_overlappable_nodes(graph: torch.fx.Graph) -> dict[torch.fx.Node, list[torch.fx.Node]]:\n\n def is_collective(node) -> bool:\n return node.target in [torch.ops._c10d_functional.all_gather_into_tensor.default, torch.ops._c10d_functional.reduce_scatter_tensor.default]\n node_to_ancestors = _get_node_to_ancestors(graph)\n collective_to_overlappable_nodes = defaultdict(list)\n for node in graph.nodes:\n if not is_collective(node):\n continue\n for x in graph.nodes:\n if node not in node_to_ancestors[x] and x not in node_to_ancestors[node] and (x.op == 'call_function'):\n collective_to_overlappable_nodes[node].append(x)\n return collective_to_overlappable_nodes", + "docstring": "For each collective in the graph, find nodes that are neither ancestors nor descendants of the collective.", + "type": "function", + "file_path": "pytorch\\torch\\_inductor\\fx_passes\\micro_pipeline_tp.py", + "ast_data": "FunctionDef name:_get_collective_to_overlappable_nodes arg:graph arguments arg FunctionDef name:is_collective arg:node arguments arg Return return:yes Compare Assign Call Assign Call For If Call For If BoolOp Compare Compare Compare Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "is_simple_variable", + "source_code": "def is_simple_variable(self) -> bool:\n attributes = self.object_proto.attributes\n return len(attributes) == 1 and attributes[0].name == constants.VARIABLE_VALUE_KEY and (not self.object_proto.children)", + "docstring": "Determine whether this value is restorable with a Tensor initializer.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py", + "ast_data": "FunctionDef name:is_simple_variable arg:self arguments arg Assign Return return:yes BoolOp Compare Call Compare" + }, + { + "library": "pytorch", + "name": "guard_or_false", + "source_code": "def guard_or_false(a: BoolLikeType) -> bool:\n return _guard_or(a, False)", + "docstring": "Try to guard a, if data dependent error encountered just return false.", + "type": "function", + "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", + "ast_data": "FunctionDef name:guard_or_false arg:a arguments arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_WeakRefInfo", + "source_code": "class _WeakRefInfo:\n\n def __init__(self, size: int, element_size: int, device: torch.device, reftype: _RefType) -> None:\n self.size = size\n self.element_size = element_size\n self.reftype = reftype\n self.device = device\n self.mem_consumed = self._calculate_mem_consumed()\n\n def _calculate_mem_consumed(self) -> int:\n mem = self.size * self.element_size\n if self.device.type == 'cuda':\n return math.ceil(mem / _PYTORCH_MIN_ALLOCATE) * _PYTORCH_MIN_ALLOCATE\n return mem\n\n def update_mem_consumed(self, st: torch.UntypedStorage) -> int:\n if st.size() != self.size:\n self.size = st.size()\n self.mem_consumed = self._calculate_mem_consumed()\n return self.mem_consumed\n\n @classmethod\n def create_winfo(cls, st: torch.UntypedStorage, device: torch.device, reftype: _RefType, callback: Optional[Callable[[Self, weakref.ref], Any]]=None) -> tuple[Self, weakref.ref]:\n winfo = cls(st.size(), st.element_size(), device, reftype)\n w_st = weakref.ref(st, partial(callback, winfo) if callback else None)\n return (winfo, w_st)", + "docstring": "Manages memory statistics and device attributes for tensor storages.", + "type": "class", + "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py", + "ast_data": "ClassDef name:_WeakRefInfo FunctionDef name:__init__ arg:self arg:size arg:element_size arg:device arg:reftype arguments arg arg arg arg arg Assign Assign Assign Assign Assign Call FunctionDef name:_calculate_mem_consumed arg:self arguments arg Assign If Compare Return return:yes Call Return return:yes FunctionDef name:update_mem_consumed arg:self arg:st arguments arg arg If Compare Call Assign Call Assign Call Return return:yes FunctionDef name:create_winfo arg:cls arg:st arg:device arg:reftype arg:callback arguments arg arg arg arg arg Assign Call Call Call Assign Call Call Return return:yes" + }, + { + "library": "scipy", + "name": "lpn", + "source_code": "@_deprecated(__DEPRECATION_MSG_1_15.format('lpn', 'legendre_p_all'))\ndef lpn(n, z):\n return legendre_p_all(n, z, diff_n=1)", + "docstring": "Legendre function of the first kind. Compute sequence of Legendre functions of the first kind (polynomials), Pn(z) and derivatives for all degrees from 0 to n (inclusive). See also special.legendre for polynomial class. .. deprecated:: 1.15.0 This function is deprecated and will be removed in SciPy 1.17.0. Please use instead. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.", + "type": "function", + "file_path": "scipy\\scipy\\special\\_basic.py", + "ast_data": "FunctionDef name:lpn arg:n arg:z arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "skip", + "source_code": "def skip(self, delta):\n\n def update_fn(v):\n return self._skip_single_var(v, delta)\n if values_util.is_saving_non_distributed():\n return update_fn(self.state)\n if self._distribution_strategy is not None:\n with distribute_lib.enter_or_assert_strategy(self._distribution_strategy):\n if distribute_lib.in_cross_replica_context():\n values_util.mark_as_unsaveable()\n if distribute_lib.in_cross_replica_context() or 'CentralStorage' in type(self._distribution_strategy).__name__:\n return distribute_lib.get_strategy().extended.update(self.state, update_fn)\n return update_fn(self.state)", + "docstring": "Advance the counter of a counter-based RNG. Args: delta: the amount of advancement. The state of the RNG after will be the same as that after (or any other distribution). The actual increment added to the counter is an unspecified implementation detail. Returns: A of type .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py", + "ast_data": "FunctionDef name:skip arg:self arg:delta arguments arg arg FunctionDef name:update_fn arg:v arguments arg Return return:yes Call If Call Return return:yes Call If Compare With Call If Call Call If BoolOp Call Compare Call Return return:yes Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "is_parametrized", + "source_code": "def is_parametrized(module: Module, tensor_name: Optional[str]=None) -> bool:\n parametrizations = getattr(module, 'parametrizations', None)\n if parametrizations is None or not isinstance(parametrizations, ModuleDict):\n return False\n if tensor_name is None:\n return len(parametrizations) > 0\n else:\n return tensor_name in parametrizations", + "docstring": "Determine if a module has a parametrization. Args: module (nn.Module): module to query tensor_name (str, optional): name of the parameter in the module Default: `moduletensor_nametensor_name`", + "type": "function", + "file_path": "pytorch\\torch\\nn\\utils\\parametrize.py", + "ast_data": "FunctionDef name:is_parametrized arg:module arg:tensor_name arguments arg arg Assign Call If BoolOp Compare Call Return return:yes If Compare Return return:yes Compare Call Return return:yes Compare" + }, + { + "library": "pytorch", + "name": "cosine_embedding_loss", + "source_code": "def cosine_embedding_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: float=0, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean') -> Tensor:\n if has_torch_function_variadic(input1, input2, target):\n return handle_torch_function(cosine_embedding_loss, (input1, input2, target), input1, input2, target, margin=margin, size_average=size_average, reduce=reduce, reduction=reduction)\n if size_average is not None or reduce is not None:\n reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n else:\n reduction_enum = _Reduction.get_enum(reduction)\n return torch.cosine_embedding_loss(input1, input2, target, margin, reduction_enum)", + "docstring": "Compute the cosine embedding loss. See :class: for details. Args: input1 (Tensor): Predicted values. input2 (Tensor): Predicted values. target (Tensor): Ground truth values. margin (float, optional): Margin for cosine embedding. Has a default value of 0. size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken. 'sum': the output will be summed. 'none': no reduction will be applied. Default: 'mean'. Returns: Tensor: Cosine embedding loss.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\functional.py", + "ast_data": "FunctionDef name:cosine_embedding_loss arg:input1 arg:input2 arg:target arg:margin arg:size_average arg:reduce arg:reduction arguments arg arg arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, variant_tensor, resource_creator):\n super(_VariantTracker, self).__init__(device='CPU')\n self._resource_handle = variant_tensor\n if not isinstance(resource_creator, def_function.Function):\n raise TypeError('Resource creator should already be a tf.function.')\n self._create_resource = resource_creator", + "docstring": "Record that is associated with . Args: variant_tensor: The variant-dtype Tensor associated with the Dataset. This Tensor will be a captured input to functions which use the Dataset, and is used by saving code to identify the corresponding _VariantTracker. resource_creator: A zero-argument function which creates a new variant-dtype Tensor. This function will be included in SavedModels and run to re-create the Dataset's variant Tensor on restore.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:variant_tensor arg:resource_creator arguments arg arg arg Call Call Assign If Call Raise Call Assign" + }, + { + "library": "tensorflow", + "name": "check_with_golden", + "source_code": "def check_with_golden(filename):\n path_to_file = PATH_TO_DIR + '/data/' + filename\n if os.path.isfile(path_to_file) and os.path.isfile(CUDA_CC_GOLDEN_DIR):\n with open(path_to_file, 'r') as f_new:\n with open(CUDA_CC_GOLDEN_DIR, 'r') as f_golden:\n diff = difflib.unified_diff(f_new.readlines(), f_golden.readlines(), fromfile=path_to_file, tofile=CUDA_CC_GOLDEN_DIR)\n diff_list = []\n for line in diff:\n diff_list.append(line)\n if diff_list:\n print('WARNING: difference(s) found between new csv and golden csv.')\n print(diff_list)\n else:\n print('No difference found between new csv and golen csv.')", + "docstring": "Checks the newly created CUDA compute capability file with the golden. If differences are found, then it prints a list of all mismatches as a . Golden file must reside in directory. Args: filename: String that is the name of the newly created file.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\data\\cuda_compute_capability.py", + "ast_data": "FunctionDef name:check_with_golden arg:filename arguments arg Assign If BoolOp Call Call With Call With Call Assign Call Call Call Assign For Call If Call Call Call" + }, + { + "library": "kornia", + "name": "forward", + "source_code": "def forward(self, points: Optional[tuple[Tensor, Tensor]], boxes: Optional[Tensor], masks: Optional[Tensor]) -> tuple[Tensor, Tensor]:\n bs = self._get_batch_size(points, boxes, masks)\n sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())\n if points is not None:\n coords, labels = points\n point_embeddings = self._embed_points(coords, labels, pad=boxes is None)\n sparse_embeddings = concatenate([sparse_embeddings, point_embeddings], dim=1)\n if boxes is not None:\n box_embeddings = self._embed_boxes(boxes)\n sparse_embeddings = concatenate([sparse_embeddings, box_embeddings], dim=1)\n if masks is not None:\n dense_embeddings = self._embed_masks(masks)\n else:\n dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(bs, -1, self.image_embedding_size[0], self.image_embedding_size[1])\n return (sparse_embeddings, dense_embeddings)", + "docstring": "Embeds different types of prompts, returning both sparse and dense embeddings. Args: points: point coordinates and labels to embed. boxes: boxes to embed masks: masks to embed Returns: - sparse embeddings for the points and boxes, with shape BxNx(embed_dim), where N is determined by the number of input points and boxes. - dense embeddings for the masks, in the shape Bx(embed_dim)x(embed_H)x(embed_W)", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\prompt_encoder.py", + "ast_data": "FunctionDef name:forward arg:self arg:points arg:boxes arg:masks arguments arg arg arg arg Assign Call Assign Call Call If Compare Assign Assign Call Compare Assign Call If Compare Assign Call Assign Call If Compare Assign Call Assign Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "execution_to_tensor_values", + "source_code": "def execution_to_tensor_values(self, execution):\n debug_event = self._reader.read_execution_event(execution.locator)\n return [_parse_tensor_value(tensor_proto) for tensor_proto in debug_event.execution.tensor_protos]", + "docstring": "Read the full tensor values from an Execution or ExecutionDigest. Args: execution: An or object. Returns: A list of numpy arrays representing the output tensor values of the execution event.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", + "ast_data": "FunctionDef name:execution_to_tensor_values arg:self arg:execution arguments arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "do_quantize_training_on_graphdef", + "source_code": "@deprecation.deprecated(None, 'GraphDef quantized training rewriter is deprecated in the long term.')\n@tf_export(v1=['train.do_quantize_training_on_graphdef'])\ndef do_quantize_training_on_graphdef(input_graph, num_bits):\n graph = graph_pb2.GraphDef()\n result_graph_string = DoQuantizeTrainingOnGraphDefHelper(input_graph.SerializeToString(), num_bits)\n graph.ParseFromString(result_graph_string)\n return graph", + "docstring": "A general quantization scheme is being developed in . Consider using that instead, though since it is in the tf.contrib namespace, it is not subject to backward compatibility guarantees. Args: input_graph: A . num_bits: The number of bits for quantize training. Returns: The graph with quantize training done.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\quantize_training.py", + "ast_data": "FunctionDef name:do_quantize_training_on_graphdef arg:input_graph arg:num_bits arguments arg arg Assign Call Assign Call Call Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "validate_saveables_for_saved_model", + "source_code": "def validate_saveables_for_saved_model(saveables, obj):\n if isinstance(obj, python_state.PythonState):\n logging.warn(f'Note that object {obj} stores python values into the checkpoint. These values will not be restored when loading the SavedModel into python.')\n return []\n if any((isinstance(saveable, trackable.NoRestoreSaveable) for saveable in saveables)):\n return []\n return saveables", + "docstring": "Makes sure SaveableObjects are compatible with SavedModel.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py", + "ast_data": "FunctionDef name:validate_saveables_for_saved_model arg:saveables arg:obj arguments arg arg If Call Call Return return:no If Call Call Return return:no Return return:yes" + }, + { + "library": "tensorflow", + "name": "decode", + "source_code": "@abc.abstractmethod\ndef decode(self, spec, encoded_value):\n raise NotImplementedError(f'{type(self).__name__}.decode')", + "docstring": "Decodes from a batchable tensor encoding. Args: spec: The TypeSpec for the result value. If encoded values with spec were batched, then should be ; or if encoded values with spec were unbatched, then should be . encoded_value: A nest of values returned by ; or a nest of values that was formed by stacking, unstacking, or concatenating the corresponding elements of values returned by . Returns: A value compatible with .", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", + "ast_data": "FunctionDef name:decode arg:self arg:spec arg:encoded_value arguments arg arg arg Raise Call Call" + }, + { + "library": "tensorflow", + "name": "num_embedding_devices_per_chip", + "source_code": "@property\ndef num_embedding_devices_per_chip(self):\n return self.tpu_hardware_feature_proto.num_embedding_devices_per_chip", + "docstring": "Number of embedding accelerator devices per chip. Returns: Number of embedding devices per chip.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_hardware_feature.py", + "ast_data": "FunctionDef name:num_embedding_devices_per_chip arg:self arguments arg Return return:yes" + }, + { + "library": "django", + "name": "generate_renamed_fields", + "source_code": "def generate_renamed_fields(self):\n for rem_app_label, rem_model_name, rem_db_column, rem_field_name, app_label, model_name, field, field_name in self.renamed_operations:\n if rem_db_column != field.db_column:\n altered_field = field.clone()\n altered_field.name = rem_field_name\n self.add_operation(app_label, operations.AlterField(model_name=model_name, name=rem_field_name, field=altered_field))\n self.add_operation(app_label, operations.RenameField(model_name=model_name, old_name=rem_field_name, new_name=field_name))\n self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))\n self.old_field_keys.add((app_label, model_name, field_name))", + "docstring": "Generate RenameField operations.", + "type": "method", + "file_path": "django\\django\\db\\migrations\\autodetector.py", + "ast_data": "FunctionDef name:generate_renamed_fields arg:self arguments arg For If Compare Assign Call Assign Call Call Call Call Call Call" + }, + { + "library": "pytorch", + "name": "TensorWeakRef", + "source_code": "class TensorWeakRef:\n ref: WeakRef[Tensor]\n\n def __init__(self, tensor: Tensor):\n assert isinstance(tensor, Tensor)\n self.ref = weakref.ref(tensor)\n\n def __call__(self):\n out = self.ref()\n if out is None:\n return out\n assert isinstance(out, Tensor)\n out._fix_weakref()\n return out", + "docstring": "Wrapper around a weak ref of a Tensor that handles the _fix_weakref() call required when unwrapping a Tensor weakref.", + "type": "class", + "file_path": "pytorch\\torch\\utils\\weak.py", + "ast_data": "ClassDef name:TensorWeakRef FunctionDef name:__init__ arg:self arg:tensor arguments arg arg Call Assign Call FunctionDef name:__call__ arg:self arguments arg Assign Call If Compare Return return:yes Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_warn_mkl_vcomp", + "source_code": "def _warn_mkl_vcomp(self, n_active_threads):\n warnings.warn(f'MiniBatchKMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can prevent it by setting batch_size >= {self._n_threads * CHUNK_SIZE} or by setting the environment variable OMP_NUM_THREADS={n_active_threads}')", + "docstring": "Warn when vcomp and mkl are both present", + "type": "method", + "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py", + "ast_data": "FunctionDef name:_warn_mkl_vcomp arg:self arg:n_active_threads arguments arg arg Call" + }, + { + "library": "pytorch", + "name": "safe_globals", + "source_code": "class safe_globals(_weights_only_unpickler._safe_globals):\n pass", + "docstring": "Context-manager that adds certain globals as safe for `torch.load(f.name, weights_only=True)` will fail with # Unsupported global: GLOBAL __main__.MyTensor was not an allowed global by default. # Check the code and make sure MyTensor is safe to be used when loaded from an arbitrary checkpoint. ... with torch.serialization.safe_globals([MyTensor]): ... torch.load(f.name, weights_only=True) # MyTensor([[-0.5024, -1.8152, -0.5455], # [-0.8234, 2.0500, -0.3657]]) >>> assert torch.serialization.get_safe_globals() == []", + "type": "class", + "file_path": "pytorch\\torch\\serialization.py", + "ast_data": "ClassDef name:safe_globals" + }, + { + "library": "scipy", + "name": "Levy05", + "source_code": "class Levy05(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0])\n self.global_optimum = [[-1.30685, -1.42485]]\n self.fglob = -176.1375779\n\n def fun(self, x, *args):\n self.nfev += 1\n i = arange(1, 6)\n a = i * cos((i - 1) * x[0] + i)\n b = i * cos((i + 1) * x[1] + i)\n return sum(a) * sum(b) + (x[0] + 1.42513) ** 2 + (x[1] + 0.80032) ** 2", + "docstring": "Levy 5 objective function. This class defines the Levy 5 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Levy05}}(\\mathbf{x}) = \\sum_{i=1}^{5} i \\cos \\left[(i-1)x_1 + i \\right] \\times \\sum_{j=1}^{5} j \\cos \\left[(j+1)x_2 + j \\right] + (x_1 + 1.42513)^2 + (x_2 + 0.80032)^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math:. .. [1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_L.py", + "ast_data": "ClassDef name:Levy05 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "numpy_text", + "source_code": "def numpy_text(tensor, is_repr=False):\n if tensor.dtype.is_numpy_compatible:\n text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())\n else:\n text = ''\n if '\\n' in text:\n text = '\\n' + text\n return text", + "docstring": "Human readable representation of a tensor's numpy value.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\autocast_variable.py", + "ast_data": "FunctionDef name:numpy_text arg:tensor arg:is_repr arguments arg arg If Assign Call Call Call Call Assign If Compare Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "_validate_inputs", + "source_code": "@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.VALIDATE_INPUTS)\ndef _validate_inputs(self, input_tensors, quantized_input_stats):\n if not self._is_unknown_shapes_allowed() and self._has_valid_tensors():\n for tensor in input_tensors:\n shape = tensor.shape\n if not shape:\n raise ValueError(\"Provide an input shape for input array '{0}'.\".format(_get_tensor_name(tensor)))\n shape_list = shape.as_list()\n if None in shape_list[1:]:\n raise ValueError(\"None is only supported in the 1st dimension. Tensor '{0}' has invalid shape '{1}'.\".format(_get_tensor_name(tensor), shape_list))\n elif shape_list and shape_list[0] is None:\n self._set_batch_size(batch_size=1)\n if quantized_input_stats:\n self._quantized_stats = []\n invalid_stats = []\n for name in self.get_input_arrays():\n if name in quantized_input_stats:\n self._quantized_stats.append(quantized_input_stats[name])\n else:\n invalid_stats.append(name)\n if invalid_stats:\n raise ValueError(\"Quantization input stats are not available for input tensors '{0}'.\".format(','.join(invalid_stats)))\n else:\n self._quantized_stats = None", + "docstring": "Validate input parameters. Args: input_tensors: List of input tensors. quantized_input_stats: Map of input tensor names to a tuple of floats representing the mean and standard deviation of the training data. Raises: ValueError: Input shape is not specified. Quantization input stats is required but not provided.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", + "ast_data": "FunctionDef name:_validate_inputs arg:self arg:input_tensors arg:quantized_input_stats arguments arg arg arg If BoolOp Call Call For Assign If Raise Call Call Call Assign Call If Compare Raise Call Call Call If BoolOp Compare Call If Assign Assign For Call If Compare Call Call If Raise Call Call Call Assign Call" + }, + { + "library": "tensorflow", + "name": "set_caching_device", + "source_code": "def set_caching_device(self, caching_device):\n if context.executing_eagerly():\n raise NotImplementedError('Caching devices are not yet supported when eager execution is enabled.')\n self._caching_device = caching_device", + "docstring": "Set caching_device for this scope.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py", + "ast_data": "FunctionDef name:set_caching_device arg:self arg:caching_device arguments arg arg If Call Raise Call Assign" + }, + { + "library": "scrapy", + "name": "_dumps", + "source_code": "def _dumps(self, data: Any) -> str:\n return json.dumps(data, **self._dumps_kwargs)", + "docstring": "Convert to JSON", + "type": "method", + "file_path": "scrapy\\scrapy\\http\\request\\json_request.py", + "ast_data": "FunctionDef name:_dumps arg:self arg:data arguments arg arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "GrouperView", + "source_code": "class GrouperView:\n\n def __init__(self, grouper):\n self._grouper = grouper\n\n def __contains__(self, item):\n return item in self._grouper\n\n def __iter__(self):\n return iter(self._grouper)\n\n def joined(self, a, b):\n return self._grouper.joined(a, b)\n\n def get_siblings(self, a):\n return self._grouper.get_siblings(a)", + "docstring": "Immutable view over a .", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", + "ast_data": "ClassDef name:GrouperView FunctionDef name:__init__ arg:self arg:grouper arguments arg arg Assign FunctionDef name:__contains__ arg:self arg:item arguments arg arg Return return:yes Compare FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:joined arg:self arg:a arg:b arguments arg arg arg Return return:yes Call FunctionDef name:get_siblings arg:self arg:a arguments arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_variational_recurrent_dropout_value", + "source_code": "def _variational_recurrent_dropout_value(self, unused_index, value, noise, keep_prob):\n random_tensor = keep_prob + noise\n binary_tensor = math_ops.floor(random_tensor)\n ret = math_ops.divide(value, keep_prob) * binary_tensor\n ret.set_shape(value.get_shape())\n return ret", + "docstring": "Performs dropout given the pre-calculated noise tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_wrapper_impl.py", + "ast_data": "FunctionDef name:_variational_recurrent_dropout_value arg:self arg:unused_index arg:value arg:noise arg:keep_prob arguments arg arg arg arg arg Assign Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "skip_data", + "source_code": "class skip_data:\n\n def __init__(self, materialize_fake_tensors: bool=False):\n self.materialize_fake_tensors = materialize_fake_tensors\n\n def __enter__(self):\n global _serialization_tls\n self._old_skip_data = _serialization_tls.skip_data\n self._old_materialize_fake_tensors = _serialization_tls.materialize_fake_tensors\n _serialization_tls.skip_data = True\n _serialization_tls.materialize_fake_tensors = self.materialize_fake_tensors\n\n def __exit__(self, type, value, tb):\n global _serialization_tls\n _serialization_tls.skip_data = self._old_skip_data\n _serialization_tls.materialize_fake_tensors = self._old_materialize_fake_tensors", + "docstring": "Context-manager that skips writing/reading storage bytes for `` context manager is an early prototype and is subject to change. Args: materialize_fake_tensors: Whether to materialize FakeTensors during save. This is a no-op for the load path. Example: >>> # xdoctest: +SKIP(\"NamedTemporaryFile on Windows\") >>> import tempfile >>> t = torch.randn(2, 3) >>> with tempfile.NamedTemporaryFile() as f: ... with torch.serialization.skip_data(): ... torch.save(t, f.name) ... torch.load(f.name, weights_only=True) tensor([[0., 0., 0.], [0., 0., 0.]])", + "type": "class", + "file_path": "pytorch\\torch\\serialization.py", + "ast_data": "ClassDef name:skip_data FunctionDef name:__init__ arg:self arg:materialize_fake_tensors arguments arg arg Assign FunctionDef name:__enter__ arg:self arguments arg Assign Assign Assign Assign FunctionDef name:__exit__ arg:self arg:type arg:value arg:tb arguments arg arg arg arg Assign Assign" + }, + { + "library": "scikit-learn", + "name": "weight_intercept", + "source_code": "def weight_intercept(self, coef):\n if not self.base_loss.is_multiclass:\n if self.fit_intercept:\n intercept = coef[-1]\n weights = coef[:-1]\n else:\n intercept = 0.0\n weights = coef\n else:\n if coef.ndim == 1:\n weights = coef.reshape((self.base_loss.n_classes, -1), order='F')\n else:\n weights = coef\n if self.fit_intercept:\n intercept = weights[:, -1]\n weights = weights[:, :-1]\n else:\n intercept = 0.0\n return (weights, intercept)", + "docstring": "Helper function to get coefficients and intercept. Parameters ---------- coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) Coefficients of a linear model. If shape (n_classes * n_dof,), the classes of one feature are contiguous, i.e. one reconstructs the 2d-array via coef.reshape((n_classes, -1), order=\"F\"). Returns ------- weights : ndarray of shape (n_features,) or (n_classes, n_features) Coefficients without intercept term. intercept : float or ndarray of shape (n_classes,) Intercept terms.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_linear_loss.py", + "ast_data": "FunctionDef name:weight_intercept arg:self arg:coef arguments arg arg If If Assign Assign Assign Assign If Compare Assign Call Assign If Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_legacy_output_shapes", + "source_code": "@tf_export(v1=['data.get_output_shapes'])\ndef get_legacy_output_shapes(dataset_or_iterator):\n return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), get_structure(dataset_or_iterator))", + "docstring": "Returns the output shapes for elements of the input dataset / iterator. Args: dataset_or_iterator: A or . Returns: A (nested) structure of objects matching the structure of the dataset / iterator elements and specifying the shape of the individual components. @compatibility(TF2) This is a legacy API for inspecting the type signature of dataset elements. In TF 2, you should use the attribute instead. @end_compatibility", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", + "ast_data": "FunctionDef name:get_legacy_output_shapes arg:dataset_or_iterator arguments arg Return return:yes Call arguments arg Call Call Call" + }, + { + "library": "django", + "name": "camel_case_to_spaces", + "source_code": "def camel_case_to_spaces(value):\n return re_camel_case.sub(' \\\\1', value).strip().lower()", + "docstring": "Split CamelCase and convert to lowercase. Strip surrounding whitespace.", + "type": "function", + "file_path": "django\\django\\utils\\text.py", + "ast_data": "FunctionDef name:camel_case_to_spaces arg:value arguments arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "_find_originating_frame", + "source_code": "def _find_originating_frame(caller_fn_scope, innermost=True):\n ctx_frame = inspect.currentframe()\n result = None\n while ctx_frame is not None:\n if ctx_frame.f_locals.get(caller_fn_scope.name, None) is caller_fn_scope:\n result = ctx_frame\n if innermost:\n break\n ctx_frame = ctx_frame.f_back\n assert result is not None, 'the conversion process should ensure the caller_fn_scope is always found somewhere on the call stack'\n return result", + "docstring": "Locates the frame in which was defined.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\py_builtins.py", + "ast_data": "FunctionDef name:_find_originating_frame arg:caller_fn_scope arg:innermost arguments arg arg Assign Call Assign While Compare If Compare Call Assign If Assign Compare Return return:yes" + }, + { + "library": "pytorch", + "name": "choose_qparams_symmetric_tensor", + "source_code": "@impl(quantized_decomposed_lib, 'choose_qparams_symmetric.tensor', 'CompositeExplicitAutograd')\ndef choose_qparams_symmetric_tensor(input: torch.Tensor, qmin: int, qmax: int, eps: float, dtype: torch.dtype) -> tuple[torch.Tensor, torch.Tensor]:\n assert input.dtype in [torch.float32, torch.float16, torch.bfloat16], f'Expecting input to have dtype torch.float32/16/b16, but got dtype: {input.dtype}'\n assert dtype in _DTYPE_TO_QVALUE_BOUNDS, f'Expecting target dtype to be one of {_DTYPE_TO_QVALUE_BOUNDS.keys()}, but got: {dtype}'\n validate_qmin_qmax(qmin, qmax)\n min_val, max_val = torch.aminmax(input)\n return determine_qparams(min_val, max_val, qmin, qmax, dtype, torch.Tensor([eps]), has_customized_qrange=False, qscheme=torch.per_tensor_symmetric)", + "docstring": "Given an input Tensor, derive the per tensor affine quantization parameter (scale and zero_point) for target quantized Tensor from the Tensor Args: input (torch.Tensor): floating point input Tensor quant_min (int): minimum quantized value for target quantized Tensor quant_max (int): maximum quantized value for target quantized Tensor dtype (torch.dtype): dtype for target quantized Tensor Returns: scale (float): quantization parameter for the target quantized Tensor zero_point (int): quantization parameter for the target quantized Tensor", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py", + "ast_data": "FunctionDef name:choose_qparams_symmetric_tensor arg:input arg:qmin arg:qmax arg:eps arg:dtype arguments arg arg arg arg arg Compare Compare Call Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "pytorch", + "name": "stft", + "source_code": "def stft(self, n_fft: int, hop_length: Optional[int]=None, win_length: Optional[int]=None, window: 'Optional[Tensor]'=None, center: bool=True, pad_mode: str='reflect', normalized: bool=False, onesided: Optional[bool]=None, return_complex: Optional[bool]=None, align_to_window: Optional[bool]=None):\n if has_torch_function_unary(self):\n return handle_torch_function(Tensor.stft, (self,), self, n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode, normalized=normalized, onesided=onesided, return_complex=return_complex, align_to_window=align_to_window)\n return torch.stft(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex=return_complex, align_to_window=align_to_window)", + "docstring": "See :func: .. warning:: This function changed signature at version 0.4.1. Calling with the previous signature may cause error or return incorrect result.", + "type": "method", + "file_path": "pytorch\\torch\\_tensor.py", + "ast_data": "FunctionDef name:stft arg:self arg:n_fft arg:hop_length arg:win_length arg:window arg:center arg:pad_mode arg:normalized arg:onesided arg:return_complex arg:align_to_window arguments arg arg arg arg arg arg arg arg arg arg arg If Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "extract", + "source_code": "@abc.abstractmethod\ndef extract(self, accumulator):\n pass", + "docstring": "Convert an accumulator into a dict of output values. Args: accumulator: The accumulator to convert. Returns: A dict of ndarrays representing the data in this accumulator.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py", + "ast_data": "FunctionDef name:extract arg:self arg:accumulator arguments arg arg" + }, + { + "library": "matplotlib", + "name": "inferno", + "source_code": "def inferno() -> None:\n set_cmap('inferno')", + "docstring": "Set the colormap to 'inferno'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.", + "type": "function", + "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", + "ast_data": "FunctionDef name:inferno arguments Call" + }, + { + "library": "matplotlib", + "name": "get_patch_transform", + "source_code": "def get_patch_transform(self):\n return transforms.IdentityTransform()", + "docstring": "Return the instance mapping patch coordinates to data coordinates. For example, one may define a patch of a circle which represents a radius of 5 by providing coordinates for a unit circle, and a transform which scales the coordinates (the patch coordinate) by 5.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\patches.py", + "ast_data": "FunctionDef name:get_patch_transform arg:self arguments arg Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "set_minor_threshold", + "source_code": "def set_minor_threshold(self, minor_threshold):\n self._minor_threshold = minor_threshold", + "docstring": "Set the threshold for labelling minors ticks. Parameters ---------- minor_threshold : int Maximum number of locations for labelling some minor ticks. This parameter have no effect if minor is False.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", + "ast_data": "FunctionDef name:set_minor_threshold arg:self arg:minor_threshold arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "_type_spec_from_value", + "source_code": "def _type_spec_from_value(value):\n if tf_utils.is_extension_type(value):\n return value._type_spec\n if hasattr(value, 'shape') and hasattr(value, 'dtype'):\n return tensor_spec.TensorSpec(value.shape, value.dtype)\n else:\n return type_spec.type_spec_from_value(value)", + "docstring": "Grab type_spec without converting array-likes to tensors.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py", + "ast_data": "FunctionDef name:_type_spec_from_value arg:value arguments arg If Call Return return:yes If BoolOp Call Call Return return:yes Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "get_header", + "source_code": "def get_header(graphs, proto_fileformat='rawproto', default_ops='NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'):\n ops_and_kernels = get_ops_and_kernels(proto_fileformat, graphs, default_ops)\n if not ops_and_kernels:\n print('Error reading graph!')\n return 1\n return get_header_from_ops_and_kernels(ops_and_kernels, default_ops == 'all')", + "docstring": "Computes a header for use with tensorflow SELECTIVE_REGISTRATION. Args: graphs: a list of paths to GraphDef files to include. proto_fileformat: optional format of proto file, either 'textproto', 'rawproto' (default) or ops_list. The ops_list is the file contain the list of ops in JSON format, Ex: \"[[\"Transpose\", \"TransposeCpuOp\"]]\". default_ops: optional comma-separated string of operator:kernel pairs to always include implementation for. Pass 'all' to have all operators and kernels included. Default: 'NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'. Returns: the string of the header that should be written as ops_to_register.h.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\selective_registration_header_lib.py", + "ast_data": "FunctionDef name:get_header arg:graphs arg:proto_fileformat arg:default_ops arguments arg arg arg Assign Call If Call Return return:yes Return return:yes Call Compare" + }, + { + "library": "pytorch", + "name": "_replace_unbacked_bindings", + "source_code": "def _replace_unbacked_bindings(gm: torch.fx.GraphModule) -> None:\n from torch._export.utils import _get_shape_env_from_gm\n from torch.fx.experimental.symbolic_shapes import _free_unbacked_symbols_with_path\n from torch.utils._sympy.symbol import symbol_is_type, SymT\n if (shape_env := _get_shape_env_from_gm(gm)) is None:\n return\n base_unbacked_symbols = {symbol for symbol in shape_env.var_to_range if symbol_is_type(symbol, (SymT.UNBACKED_INT, SymT.UNBACKED_FLOAT)) and symbol not in shape_env.unbacked_renamings}\n for node in gm.graph.nodes:\n node.meta.pop('unbacked_bindings', None)\n if (val := node.meta.get('val')) is not None and (unbacked_bindings := _free_unbacked_symbols_with_path(val, (), shape_env=shape_env, pending=base_unbacked_symbols, simplify=True)):\n node.meta['unbacked_bindings'] = unbacked_bindings", + "docstring": "When we run an interpreter-based pass over a GraphModule, execution of data-dependent operators will produce example values with new unbacked symbols. To track that the new/old symbols are equivalent, we used to rely on the unbacked_renamings mapping. This led to problematic metadata where the unbacked_bindings keys mapped new symbols (u2) to paths containing old symbols (u0) in the example values, or worse, backed symbols or constants (e.g. if the original unbacked was replaced/specialized). Additionally this created problems with de/serialized programs, since we didn't comprehensively serialize ShapeEnv/unbacked renamings/node bindings. This pass attempts a simpler way of handling these for export, by throwing away the previously computed bindings, and re-running the pattern match used in compute_unbacked_bindings. This ensures we keep the original symbols contained in the example values, or delete bindings if they've been replaced/specialized.", + "type": "function", + "file_path": "pytorch\\torch\\export\\_trace.py", + "ast_data": "FunctionDef name:_replace_unbacked_bindings arg:gm arguments arg If Compare Call Return return:no Assign BoolOp Call Compare For Call If BoolOp Compare Call Call Assign" + }, + { + "library": "pytorch", + "name": "long", + "source_code": "def long(self):\n _warn_typed_storage_removal()\n return self._to(torch.long)", + "docstring": "Casts this storage to long type.", + "type": "method", + "file_path": "pytorch\\torch\\storage.py", + "ast_data": "FunctionDef name:long arg:self arguments arg Call Return return:yes Call" + }, + { + "library": "scipy", + "name": "eqp_kktfact", + "source_code": "def eqp_kktfact(H, c, A, b):\n n, = np.shape(c)\n m, = np.shape(b)\n kkt_matrix = block_array([[H, A.T], [A, None]], format='csc')\n kkt_vec = np.hstack([-c, -b])\n lu = linalg.splu(kkt_matrix)\n kkt_sol = lu.solve(kkt_vec)\n x = kkt_sol[:n]\n lagrange_multipliers = -kkt_sol[n:n + m]\n return (x, lagrange_multipliers)", + "docstring": "Solve equality-constrained quadratic programming (EQP) problem. Solve `` using direct factorization of the KKT system. Parameters ---------- H : sparse array, shape (n, n) Hessian matrix of the EQP problem. c : array_like, shape (n,) Gradient of the quadratic objective function. A : sparse array Jacobian matrix of the EQP problem. b : array_like, shape (m,) Right-hand side of the constraint equation. Returns ------- x : array_like, shape (n,) Solution of the KKT problem. lagrange_multipliers : ndarray, shape (m,) Lagrange multipliers of the KKT problem.", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\qp_subproblem.py", + "ast_data": "FunctionDef name:eqp_kktfact arg:H arg:c arg:A arg:b arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "barrier", + "source_code": "def barrier(self) -> None:\n if not self.use_dist:\n return\n dist.barrier(group=self.group)", + "docstring": "Add a synchronization point across all processes when using distributed. If torch.distributed is initialized, this function will invoke a barrier across the global process group. If torch.distributed is not initialized, this function is a no-op.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\checkpoint\\utils.py", + "ast_data": "FunctionDef name:barrier arg:self arguments arg If Return return:no Call" + }, + { + "library": "tensorflow", + "name": "get_preinitialized_function_spec", + "source_code": "def get_preinitialized_function_spec(concrete_function):\n if concrete_function.structured_input_signature is None or isinstance(concrete_function, wrap_function_lib.WrappedFunction):\n return None\n function_type = concrete_function.function_type\n if function_type is None:\n return None\n unconstrained_type = function_type_lib.FunctionType([function_type_lib.Parameter(p.name, p.kind, p.optional, None) for p in function_type.parameters.values()])\n default_values = {p.default for p in function_type.parameters.values() if p.optional}\n return function_type_utils.FunctionSpec(unconstrained_type, default_values, False, name=concrete_function.name)", + "docstring": "Generates an unconstrained FunctionSpec from FunctionType.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_serialization.py", + "ast_data": "FunctionDef name:get_preinitialized_function_spec arg:concrete_function arguments arg If BoolOp Compare Call Return return:no Assign If Compare Return return:no Assign Call Call Call Assign Call Return return:yes Call" + }, + { + "library": "kornia", + "name": "cy", + "source_code": "@property\ndef cy(self) -> Tensor:\n return self._params[..., 3]", + "docstring": "Returns the principal point in y direction.", + "type": "method", + "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py", + "ast_data": "FunctionDef name:cy arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "get_tensors", + "source_code": "def get_tensors(self, names: Iterable[str]) -> list[torch.Tensor]:\n return [self.get_tensor(name) for name in names]", + "docstring": "Get the tensors specified by the given paths. For example, to get the attributes mod.layer1.conv1.weight and mod.layer1.conv1.bias, use accessor.get_tensors([\"layer1.conv1.weight\", \"layer1.conv1.bias\"])", + "type": "method", + "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py", + "ast_data": "FunctionDef name:get_tensors arg:self arg:names arguments arg arg Return return:yes Call" + }, + { + "library": "scipy", + "name": "_to_corr", + "source_code": "def _to_corr(self, m):\n if not (m.flags.c_contiguous and m.dtype == np.float64 and (m.shape[0] == m.shape[1])):\n raise ValueError()\n d = m.shape[0]\n for i in range(d - 1):\n if m[i, i] == 1:\n continue\n elif m[i, i] > 1:\n for j in range(i + 1, d):\n if m[j, j] < 1:\n break\n else:\n for j in range(i + 1, d):\n if m[j, j] > 1:\n break\n c, s = self._givens_to_1(m[i, i], m[j, j], m[i, j])\n mv = m.ravel()\n drot(mv, mv, c, -s, n=d, offx=i * d, incx=1, offy=j * d, incy=1, overwrite_x=True, overwrite_y=True)\n drot(mv, mv, c, -s, n=d, offx=i, incx=d, offy=j, incy=d, overwrite_x=True, overwrite_y=True)\n return m", + "docstring": "Given a psd matrix m, rotate to put one's on the diagonal, turning it into a correlation matrix. This also requires the trace equal the dimensionality. Note: modifies input matrix", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_multivariate.py", + "ast_data": "FunctionDef name:_to_corr arg:self arg:m arguments arg arg If BoolOp Compare Compare Raise Call Assign For Call If Compare If Compare For Call If Compare For Call If Compare Assign Call Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__call__", + "source_code": "def __call__(self, shape, dtype=None, **kwargs):\n self._validate_kwargs(kwargs, support_partition=self.support_partition)\n if dtype is not None:\n dtype = dtypes.as_dtype(dtype)\n return constant_op.constant(self.value, dtype=dtype, shape=shape)", + "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. If not provided the dtype of the tensor created will be the type of the inital value. **kwargs: Additional keyword arguments. Raises: TypeError: If the initializer cannot create a tensor of the requested dtype.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call If Compare Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "dirac_", + "source_code": "def dirac_(tensor, groups=1):\n dimensions = tensor.ndimension()\n if dimensions not in [3, 4, 5]:\n raise ValueError('Only tensors with 3, 4, or 5 dimensions are supported')\n sizes = tensor.size()\n if sizes[0] % groups != 0:\n raise ValueError('dim 0 must be divisible by groups')\n out_chans_per_grp = sizes[0] // groups\n min_dim = min(out_chans_per_grp, sizes[1])\n with torch.no_grad():\n tensor.zero_()\n for g in range(groups):\n for d in range(min_dim):\n if dimensions == 3:\n tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2] = 1\n elif dimensions == 4:\n tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2, tensor.size(3) // 2] = 1\n else:\n tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2, tensor.size(3) // 2, tensor.size(4) // 2] = 1\n return tensor", + "docstring": "Fill the {3, 4, 5}-dimensional input with the Dirac delta function. Preserves the identity of the inputs in layers, where as many input channels are preserved as possible. In case of groups>1, each group of channels preserves identity Args: tensor: a {3, 4, 5}-dimensional groups (int, optional): number of groups in the conv layer (default: 1) Examples: >>> w = torch.empty(3, 16, 5, 5) >>> nn.init.dirac_(w) >>> w = torch.empty(3, 24, 5, 5) >>> nn.init.dirac_(w, 3)", + "type": "function", + "file_path": "pytorch\\torch\\nn\\init.py", + "ast_data": "FunctionDef name:dirac_ arg:tensor arg:groups arguments arg arg Assign Call If Compare Raise Call Assign Call If Compare Raise Call Assign Assign Call With Call Call For Call For Call If Compare Assign Call If Compare Assign Call Call Assign Call Call Call Return return:yes" + }, + { + "library": "kornia", + "name": "compute_metrics", + "source_code": "def compute_metrics(self, *args: Any) -> Dict[str, float]:\n return {}", + "docstring": "Compute metrics during the evaluation.", + "type": "method", + "file_path": "kornia\\kornia\\x\\trainer.py", + "ast_data": "FunctionDef name:compute_metrics arg:self arguments arg arg Return return:no" + }, + { + "library": "tensorflow", + "name": "client_id", + "source_code": "@tf_export('experimental.dtensor.client_id', v1=[])\ndef client_id() -> int:\n client_id_value = int(os.environ.get(_DT_CLIENT_ID, '0'))\n if client_id_value < 0:\n raise ValueError(f'Environment variable {_DT_CLIENT_ID} must be >= 0, got {client_id_value}. ')\n if client_id_value >= num_clients():\n raise ValueError(f'Environment variable {_DT_CLIENT_ID} must be < {num_clients()}, got {client_id_value}')\n return client_id_value", + "docstring": "Returns this client's ID.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\config.py", + "ast_data": "FunctionDef name:client_id arguments Assign Call Call If Compare Raise Call If Compare Call Raise Call Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_channel_flatten_input", + "source_code": "def _channel_flatten_input(x, data_format):\n graph = ops.get_default_graph()\n cache_key = (graph, x.ref(), data_format)\n if cache_key not in _channel_flatten_input_cache:\n x_shape = array_ops.shape(x)\n neg_ones = constant_op.constant([-1], dtype=x_shape.dtype)\n if data_format == b'NCHW':\n order = [1, 0, 2, 3, 4]\n shape = array_ops.concat([x_shape[1:2], neg_ones, x_shape[3:]], axis=0)\n reverse_order = order\n else:\n order = [1, 2, 3, 0, 4]\n shape = array_ops.concat([x_shape[1:4], neg_ones], axis=0)\n reverse_order = [3, 0, 1, 2, 4]\n x = array_ops.transpose(x, order)\n reverse_shape = array_ops.shape(x)\n x = array_ops.reshape(x, shape)\n outputs = (x, reverse_order, reverse_shape)\n _channel_flatten_input_cache[cache_key] = outputs\n else:\n outputs = _channel_flatten_input_cache[cache_key]\n return outputs", + "docstring": "Merge the stack dimension with the channel dimension. If S is pfor's stacking dimension, then, - for SNCHW, we transpose to NSCHW. If N dimension has size 1, the transpose should be cheap. - for SNHWC, we transpose to NHWSC. We then merge the S and C dimension. Args: x: tensor_lib.Tensor to transform. data_format: \"NCHW\" or \"NHWC\". Returns: A 3-element tuple with the transformed value, along with the shape for reshape and order for transpose required to transform back.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", + "ast_data": "FunctionDef name:_channel_flatten_input arg:x arg:data_format arguments arg arg Assign Call Assign Call If Compare Assign Call Assign Call If Compare Assign Assign Call Assign Assign Assign Call Assign Assign Call Assign Call Assign Call Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "validate_baseline_data", + "source_code": "def validate_baseline_data(baseline_data_full, config_id, baseline_yaml_file):\n if not baseline_data_full or not isinstance(baseline_data_full, dict):\n print(f'::warning file={baseline_yaml_file}::Baseline YAML is empty or not a dictionary. Skipping comparison.')\n sys.exit(0)\n if config_id not in baseline_data_full:\n print(f\"::notice::No baseline found for config_id '{config_id}' in {baseline_yaml_file}. Skipping comparison.\")\n sys.exit(0)\n config_baselines = baseline_data_full[config_id]\n if not isinstance(config_baselines, dict):\n print(f\"::warning file={baseline_yaml_file},title=Invalid Baseline Structure::Baseline entry for '{config_id}' is not a dictionary. Skipping.\")\n sys.exit(0)\n return config_baselines", + "docstring": "Validates the loaded baseline data and extracts config-specific baselines.", + "type": "function", + "file_path": "tensorflow\\third_party\\xla\\.github\\workflows\\benchmarks\\compare_with_baseline.py", + "ast_data": "FunctionDef name:validate_baseline_data arg:baseline_data_full arg:config_id arg:baseline_yaml_file arguments arg arg arg If BoolOp Call Call Call If Compare Call Call Assign If Call Call Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_flipudlr", + "source_code": "def _flipudlr(array):\n return np.flipud(np.fliplr(array))", + "docstring": "Reverse the rows and columns of an array.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py", + "ast_data": "FunctionDef name:_flipudlr arg:array arguments arg Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "_X_CenterStackOp", + "source_code": "class _X_CenterStackOp(sparse.linalg.LinearOperator):\n\n def __init__(self, X, X_mean, sqrt_sw):\n n_samples, n_features = X.shape\n super().__init__(X.dtype, (n_samples, n_features + 1))\n self.X = X\n self.X_mean = X_mean\n self.sqrt_sw = sqrt_sw\n\n def _matvec(self, v):\n v = v.ravel()\n return safe_sparse_dot(self.X, v[:-1], dense_output=True) - self.sqrt_sw * self.X_mean.dot(v[:-1]) + v[-1] * self.sqrt_sw\n\n def _matmat(self, v):\n return safe_sparse_dot(self.X, v[:-1], dense_output=True) - self.sqrt_sw[:, None] * self.X_mean.dot(v[:-1]) + v[-1] * self.sqrt_sw[:, None]\n\n def _transpose(self):\n return _XT_CenterStackOp(self.X, self.X_mean, self.sqrt_sw)", + "docstring": "Behaves as centered and scaled X with an added intercept column. This operator behaves as np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]])", + "type": "class", + "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py", + "ast_data": "ClassDef name:_X_CenterStackOp FunctionDef name:__init__ arg:self arg:X arg:X_mean arg:sqrt_sw arguments arg arg arg arg Assign Call Call Assign Assign Assign FunctionDef name:_matvec arg:self arg:v arguments arg arg Assign Call Return return:yes Call Call FunctionDef name:_matmat arg:self arg:v arguments arg arg Return return:yes Call Call FunctionDef name:_transpose arg:self arguments arg Return return:yes Call" + }, + { + "library": "authlib", + "name": "validate_id_token_encrypted_response_enc", + "source_code": "def validate_id_token_encrypted_response_enc(self):\n if self.get('id_token_encrypted_response_enc') and (not self.get('id_token_encrypted_response_alg')):\n raise InvalidClaimError('id_token_encrypted_response_enc')\n if self.get('id_token_encrypted_response_alg'):\n self.setdefault('id_token_encrypted_response_enc', 'A128CBC-HS256')\n self._validate_claim_value('id_token_encrypted_response_enc')", + "docstring": "JWE enc algorithm [JWA] REQUIRED for encrypting the ID Token issued to this Client. If id_token_encrypted_response_alg is specified, the default id_token_encrypted_response_enc value is A128CBC-HS256. When id_token_encrypted_response_enc is included, id_token_encrypted_response_alg MUST also be provided.", + "type": "method", + "file_path": "authlib\\authlib\\oidc\\registration\\claims.py", + "ast_data": "FunctionDef name:validate_id_token_encrypted_response_enc arg:self arguments arg If BoolOp Call Call Raise Call If Call Call Call" + }, + { + "library": "tensorflow", + "name": "variable_shape", + "source_code": "@abc.abstractproperty\ndef variable_shape(self):\n pass", + "docstring": "of , without batch dimension.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:variable_shape arg:self arguments arg" + }, + { + "library": "scipy", + "name": "boxcox", + "source_code": "def boxcox(x, lmbda=None, alpha=None, optimizer=None):\n x = np.asarray(x)\n if lmbda is not None:\n return special.boxcox(x, lmbda)\n if x.ndim != 1:\n raise ValueError('Data must be 1-dimensional.')\n if x.size == 0:\n return x\n if np.all(x == x[0]):\n raise ValueError('Data must not be constant.')\n if np.any(x <= 0):\n raise ValueError('Data must be positive.')\n lmax = boxcox_normmax(x, method='mle', optimizer=optimizer)\n y = boxcox(x, lmax)\n if alpha is None:\n return (y, lmax)\n else:\n interval = _boxcox_conf_interval(x, lmax, alpha)\n return (y, lmax, interval)", + "docstring": "Return a dataset transformed by a Box-Cox power transformation. Parameters ---------- x : ndarray Input array to be transformed. If is not None, this is an alias of . Returns nan if `boxcox` to transform the data so it's closest to normal: >>> ax2 = fig.add_subplot(212) >>> xt, _ = stats.boxcox(x) >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2) >>> ax2.set_title('Probplot after Box-Cox transformation') >>> plt.show()", + "type": "function", + "file_path": "scipy\\scipy\\stats\\_morestats.py", + "ast_data": "FunctionDef name:boxcox arg:x arg:lmbda arg:alpha arg:optimizer arguments arg arg arg arg Assign Call If Compare Return return:yes Call If Compare Raise Call If Compare Return return:yes If Call Compare Raise Call If Call Compare Raise Call Assign Call Assign Call If Compare Return return:yes Assign Call Return return:yes" + }, + { + "library": "django", + "name": "latest_post_date", + "source_code": "def latest_post_date(self):\n latest_date = None\n date_keys = ('updateddate', 'pubdate')\n for item in self.items:\n for date_key in date_keys:\n item_date = item.get(date_key)\n if item_date:\n if latest_date is None or item_date > latest_date:\n latest_date = item_date\n return latest_date or datetime.datetime.now(tz=datetime.UTC)", + "docstring": "Return the latest item's pubdate or updateddate. If no items have either of these attributes this return the current UTC date/time.", + "type": "method", + "file_path": "django\\django\\utils\\feedgenerator.py", + "ast_data": "FunctionDef name:latest_post_date arg:self arguments arg Assign Assign For For Assign Call If If BoolOp Compare Compare Assign Return return:yes BoolOp Call" + }, + { + "library": "django", + "name": "_checkdim", + "source_code": "def _checkdim(self, dim):\n if dim < 0 or dim > 2:\n raise GEOSException('invalid ordinate dimension \"%d\"' % dim)", + "docstring": "Check the given dimension.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py", + "ast_data": "FunctionDef name:_checkdim arg:self arg:dim arguments arg arg If BoolOp Compare Compare Raise Call" + }, + { + "library": "scipy", + "name": "aps09_f", + "source_code": "def aps09_f(x, n):\n return (1 + (1 - n) ** 4) * x - (1 - n * x) ** 4", + "docstring": "Upside down quartic with parametrizable height", + "type": "function", + "file_path": "scipy\\scipy\\optimize\\_tstutils.py", + "ast_data": "FunctionDef name:aps09_f arg:x arg:n arguments arg arg Return return:yes" + }, + { + "library": "sphinx", + "name": "iscoroutinefunction", + "source_code": "def iscoroutinefunction(obj: Any) -> TypeIs[Callable[..., types.CoroutineType[Any, Any, Any]]]:\n obj = unwrap_all(obj, stop=_is_wrapped_coroutine)\n return inspect.iscoroutinefunction(obj)", + "docstring": "Check if the object is a :external+python:term: function.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\inspect.py", + "ast_data": "FunctionDef name:iscoroutinefunction arg:obj arguments arg Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "transform_subclass", + "source_code": "def transform_subclass(t, callback, outer_size=None, outer_stride=None):\n outer_size = outer_size if outer_size is not None else t.size()\n outer_stride = outer_stride if outer_stride is not None else t.stride()\n attrs, ctx = t.__tensor_flatten__()\n transformed_tensors_dict = {}\n for attr in attrs:\n transformed_tensors_dict[attr] = callback(attr, getattr(t, attr))\n sub = type(t).__tensor_unflatten__(transformed_tensors_dict, ctx, outer_size, outer_stride)\n assert sub.shape == outer_size, f'Expected return value from {type(t)}__tensor_unflatten__() to have shape equal to {outer_size}, but got: {sub.shape}'\n assert sub.stride() == outer_stride, f'Expected return value from {type(t)}__tensor_unflatten__() to have stride equal to {outer_stride}, but got: {sub.stride()}'\n return sub", + "docstring": "Given a traceable, wrapper tensor subclass `transform_subclass` to get a transformed tensor, and putting each transformed tensor into the fresh tensor subclass instance. Note: this function will not handle ensuring that the fresh subclass gets the same (autograd, and aliasing) metadata as the original tensor. This is generally handled in other subsystems like AOTAutograd.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\_python_dispatch.py", + "ast_data": "FunctionDef name:transform_subclass arg:t arg:callback arg:outer_size arg:outer_stride arguments arg arg arg arg Assign Compare Call Assign Compare Call Assign Call Assign For Assign Call Call Assign Call Call Compare Call Compare Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "__call__", + "source_code": "def __call__(self, **kwargs):\n if len(kwargs) != len(self._inputs):\n raise ValueError('Invalid number of inputs provided for running a SignatureDef, expected %s vs provided %s' % (len(self._inputs), len(kwargs)))\n for input_name, value in kwargs.items():\n if input_name not in self._inputs:\n raise ValueError('Invalid Input name (%s) for SignatureDef' % input_name)\n self._interpreter_wrapper.ResizeInputTensor(self._inputs[input_name], np.array(value.shape, dtype=np.int32), False, self._subgraph_index)\n self._interpreter_wrapper.AllocateTensors(self._subgraph_index)\n for input_name, value in kwargs.items():\n self._interpreter_wrapper.SetTensor(self._inputs[input_name], value, self._subgraph_index)\n self._interpreter_wrapper.Invoke(self._subgraph_index)\n result = {}\n for output_name, output_index in self._outputs:\n result[output_name] = self._interpreter_wrapper.GetTensor(output_index, self._subgraph_index)\n return result", + "docstring": "Runs the SignatureDef given the provided inputs in arguments. Args: **kwargs: key,value for inputs to the model. Key is the SignatureDef input name. Value is numpy array with the value. Returns: dictionary of the results from the model invoke. Key in the dictionary is SignatureDef output name. Value is the result Tensor.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py", + "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg If Compare Call Call Raise Call Call Call For Call If Compare Raise Call Call Call Call For Call Call Call Assign For Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_edge_or_node_to_qspec", + "source_code": "def _get_edge_or_node_to_qspec(model: torch.fx.GraphModule) -> dict[EdgeOrNode, QuantizationSpecBase]:\n edge_or_node_to_qspec: dict[EdgeOrNode, QuantizationSpecBase] = {}\n for n in model.graph.nodes:\n if hasattr(n, 'meta') and 'quantization_annotation' in n.meta:\n qa = n.meta['quantization_annotation']\n for input_to_n, qspec in qa.input_qspec_map.items():\n input_edge = (input_to_n, n)\n edge_or_node_to_qspec[input_edge] = qspec\n if qa.output_qspec is not None:\n output_node = n\n qspec = qa.output_qspec\n edge_or_node_to_qspec[output_node] = qspec\n return edge_or_node_to_qspec", + "docstring": "Get a map from EdgeOrNode to quantization spec based on annotations on the nodes", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\prepare.py", + "ast_data": "FunctionDef name:_get_edge_or_node_to_qspec arg:model arguments arg For If BoolOp Call Compare Assign For Call Assign Assign If Compare Assign Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "__init__", + "source_code": "def __init__(self, checkpointer_impl, root=None, **kwargs):\n if root:\n trackable_root = root() if isinstance(root, weakref.ref) else root\n kwargs['root'] = trackable_root\n trackable_root._maybe_initialize_trackable()\n if checkpointer_impl is None:\n raise AttributeError('checkpointer_impl cannot be None for AsyncCheckpointHelper.')\n self._checkpointer_impl = checkpointer_impl\n self._checkpoint_items = kwargs\n self._checkpoint = None\n self.checkpointer()\n self._checkpoint_options = None\n self._initialized = False\n self._original_nodes = None\n self._object_map = None\n self._tpu_embedding_objects = None\n self._saveable_trackables = None\n self._default_device = device_util.current() or 'CPU:0'\n self._default_device = device_util.canonicalize(self._default_device)\n self._save_file_prefix = None\n self._use_checkpoint_save = False\n self._async_save_thread = None\n self._queue = queue.Queue(maxsize=1)\n atexit.register(self._join_async_save_thread)\n self._async_error = None\n global _END_TIME_OF_LAST_ASYNC_WRITE\n with _END_TIME_OF_LAST_ASYNC_WRITE_LOCK:\n if _END_TIME_OF_LAST_ASYNC_WRITE is None:\n _END_TIME_OF_LAST_ASYNC_WRITE = time.time()", + "docstring": "Initialize AsyncCheckpoint. Args: checkpointer_impl: The Checkpoint class to power the AsyncCheckpoint. root: The root object to checkpoint. may be a trackable object or of a trackable object. **kwargs: The keyword arguments representing the checkpointed variables. Raises: AttributeError: when checkpointer_impl is None.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:checkpointer_impl arg:root arguments arg arg arg arg If Assign Call Call Assign Call If Compare Raise Call Assign Assign Assign Call Assign Assign Assign Assign Assign Assign Assign BoolOp Call Assign Call Assign Assign Assign Assign Call Call Assign With If Compare Assign Call" + }, + { + "library": "django", + "name": "is_password_usable", + "source_code": "def is_password_usable(encoded):\n return encoded is None or not encoded.startswith(UNUSABLE_PASSWORD_PREFIX)", + "docstring": "Return True if this password wasn't generated by User.set_unusable_password(), i.e. make_password(None).", + "type": "function", + "file_path": "django\\django\\contrib\\auth\\hashers.py", + "ast_data": "FunctionDef name:is_password_usable arg:encoded arguments arg Return return:yes BoolOp Compare Call" + }, + { + "library": "pytorch", + "name": "_create_placement_strategy", + "source_code": "def _create_placement_strategy(node: Node, mesh: DeviceMesh, placements: tuple[Placement, ...], input_specs: Optional[Sequence[DTensorSpec]]=None) -> PlacementStrategy:\n placement = PlacementStrategy(input_specs=input_specs, output_specs=DTensorSpec(mesh=mesh, placements=placements))\n _populate_tensor_meta(node, placement.output_specs)\n return placement", + "docstring": "Util function to construct a placement strategy for a given node.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_tp_transform.py", + "ast_data": "FunctionDef name:_create_placement_strategy arg:node arg:mesh arg:placements arg:input_specs arguments arg arg arg arg Assign Call Call Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "ControlFlowFuncGraph", + "source_code": "class ControlFlowFuncGraph(func_graph.FuncGraph):\n\n def __init__(self, *args, **kwargs):\n super(ControlFlowFuncGraph, self).__init__(*args, **kwargs)\n outer_graph = self.outer_graph\n self._device_function_stack = outer_graph._device_function_stack.copy()\n self.is_control_flow_graph = True\n if ops.executing_eagerly_outside_functions():\n func_graph.override_func_graph_name_scope(self, self.outer_graph.get_name_scope())", + "docstring": "Contains control flow-specific FuncGraph logic.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_v2_func_graphs.py", + "ast_data": "ClassDef name:ControlFlowFuncGraph FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Assign Call Assign If Call Call Call" + }, + { + "library": "scipy", + "name": "Exp2", + "source_code": "class Exp2(Benchmark):\n\n def __init__(self, dimensions=2):\n Benchmark.__init__(self, dimensions)\n self._bounds = list(zip([0.0] * self.N, [20.0] * self.N))\n self.custom_bounds = [(0, 2), (0, 20)]\n self.global_optimum = [[1.0, 10.0]]\n self.fglob = 0.0\n\n def fun(self, x, *args):\n self.nfev += 1\n i = arange(10.0)\n vec = (exp(-i * x[0] / 10.0) - 5 * exp(-i * x[1] / 10.0) - exp(-i / 10.0) + 5 * exp(-i)) ** 2\n return sum(vec)", + "docstring": "Exp2 objective function. This class defines the Exp2 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Exp2}}(x) = \\sum_{i=0}^9 \\left ( e^{-ix_1/10} - 5e^{-ix_2/10} - e^{-i/10} + 5e^{-i} \\right )^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", + "type": "class", + "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_E.py", + "ast_data": "ClassDef name:Exp2 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Call Call Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "unshard_tensor_dim", + "source_code": "def unshard_tensor_dim(placements: Sequence[Placement], dim: int) -> tuple[Placement, ...]:\n return tuple((p if not isinstance(p, Shard) or p.dim != dim else Replicate() for p in placements))", + "docstring": "Disallow the given tensor dimension to be sharded.", + "type": "function", + "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_tensor_ops.py", + "ast_data": "FunctionDef name:unshard_tensor_dim arg:placements arg:dim arguments arg arg Return return:yes Call BoolOp Call Compare Call" + }, + { + "library": "tensorflow", + "name": "_tile_ragged_values", + "source_code": "def _tile_ragged_values(rt_input, multiples, const_multiples=None):\n ragged_rank = rt_input.ragged_rank\n nested_splits = rt_input.nested_row_splits\n inner_value_ids = math_ops.range(nested_splits[-1][-1])\n prev_splits = None\n for axis in range(ragged_rank, 0, -1):\n splits = nested_splits[axis - 1]\n if prev_splits is not None:\n splits = array_ops.gather(prev_splits * multiples[axis + 1], splits)\n if const_multiples is None or const_multiples[axis] != 1:\n inner_value_ids = ragged_util.repeat_ranges(inner_value_ids, splits, multiples[axis])\n prev_splits = splits\n ragged_tiled_values = array_ops.gather(rt_input.flat_values, inner_value_ids)\n inner_repeats = array_ops.concat([multiples[:1], multiples[ragged_rank + 1:]], axis=0)\n return array_ops.tile(ragged_tiled_values, inner_repeats)", + "docstring": "Builds flat_values tensor for a tiled . Returns a tensor that repeats the values in in the appropriate pattern to construct a that tiles as specified by . Args: rt_input: The whose values should be repeated. multiples: A 1-D integer , indicating how many times each dimension should be repeated. const_multiples: Optional constant value for multiples. Used to skip tiling dimensions where . Returns: A with the same type and rank as . #### Example: >>> rt = tf.ragged.constant([[1, 2], [3]]) >>> _tile_ragged_values(rt, tf.constant([3, 2])).numpy() array([1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3], dtype=int32)", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py", + "ast_data": "FunctionDef name:_tile_ragged_values arg:rt_input arg:multiples arg:const_multiples arguments arg arg arg Assign Assign Assign Call Assign For Call Assign If Compare Assign Call If BoolOp Compare Compare Assign Call Assign Assign Call Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_atleast_nd", + "source_code": "def _atleast_nd(n, new_shape, *arys):\n\n def f(x):\n x = asarray(x)\n return asarray(np_utils.cond(np_utils.greater(n, array_ops.rank(x)), lambda: reshape(x, new_shape(n, array_ops.shape(x))), lambda: x))\n arys = list(map(f, arys))\n if len(arys) == 1:\n return arys[0]\n else:\n return arys", + "docstring": "Reshape arrays to be at least -dimensional. Args: n: The minimal rank. new_shape: a function that takes and the old shape and returns the desired new shape. *arys: ndarray(s) to be reshaped. Returns: The reshaped array(s).", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py", + "ast_data": "FunctionDef name:_atleast_nd arg:n arg:new_shape arguments arg arg arg FunctionDef name:f arg:x arguments arg Assign Call Return return:yes Call Call Call Call arguments Call Call Call arguments Assign Call Call If Compare Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "PerWorkerDatasetFromDataset", + "source_code": "class PerWorkerDatasetFromDataset(PerWorkerDatasetFromDatasetFunction):\n\n def __init__(self, dataset, coordinator):\n if isinstance(dataset, input_lib.DistributedDataset):\n original_dataset = dataset._original_dataset\n serialized = serialize_dataset_to_graph(original_dataset)\n\n def dataset_fn():\n deserialized = deserialize_dataset_from_graph(serialized, original_dataset.element_spec)\n dataset.build(dataset_to_replace=deserialized)\n return dataset\n elif isinstance(dataset, input_lib.DistributedDatasetsFromFunction):\n\n def dataset_fn():\n dataset.build()\n return dataset\n elif isinstance(dataset, dataset_ops.Dataset):\n serialized = serialize_dataset_to_graph(dataset)\n\n def dataset_fn():\n return deserialize_dataset_from_graph(serialized, dataset.element_spec)\n else:\n raise ValueError('Unexpected dataset type!')\n super(PerWorkerDatasetFromDataset, self).__init__(dataset_fn, coordinator)", + "docstring": "Represents worker-distributed datasets created from a dataset.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py", + "ast_data": "ClassDef name:PerWorkerDatasetFromDataset FunctionDef name:__init__ arg:self arg:dataset arg:coordinator arguments arg arg arg If Call Assign Assign Call FunctionDef name:dataset_fn arguments Assign Call Call Return return:yes If Call FunctionDef name:dataset_fn arguments Call Return return:yes If Call Assign Call FunctionDef name:dataset_fn arguments Return return:yes Call Raise Call Call Call" + }, + { + "library": "django", + "name": "lat_lon", + "source_code": "def lat_lon(self, query):\n data = self.city(query)\n return (data['latitude'], data['longitude'])", + "docstring": "Return a tuple of the (latitude, longitude) for the given query.", + "type": "method", + "file_path": "django\\django\\contrib\\gis\\geoip2.py", + "ast_data": "FunctionDef name:lat_lon arg:self arg:query arguments arg arg Assign Call Return return:yes" + }, + { + "library": "pandas", + "name": "transpose_homogeneous_pyarrow", + "source_code": "def transpose_homogeneous_pyarrow(arrays: Sequence[ArrowExtensionArray]) -> list[ArrowExtensionArray]:\n arrays = list(arrays)\n nrows, ncols = (len(arrays[0]), len(arrays))\n indices = np.arange(nrows * ncols).reshape(ncols, nrows).T.reshape(-1)\n arr = pa.chunked_array([chunk for arr in arrays for chunk in arr._pa_array.chunks])\n arr = arr.take(indices)\n return [ArrowExtensionArray(arr.slice(i * ncols, ncols)) for i in range(nrows)]", + "docstring": "Transpose arrow extension arrays in a list, but faster. Input should be a list of arrays of equal length and all have the same dtype. The caller is responsible for ensuring validity of input data.", + "type": "function", + "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py", + "ast_data": "FunctionDef name:transpose_homogeneous_pyarrow arg:arrays arguments arg Assign Call Assign Call Call Assign Call Call Call Assign Call Assign Call Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "get_metadata_routing", + "source_code": "def get_metadata_routing(self):\n router = MetadataRouter(owner=self.__class__.__name__)\n router.add(estimator=self.estimator, method_mapping=MethodMapping().add(caller='fit', callee='fit'))\n router.add(splitter=check_cv(self.cv, classifier=is_classifier(self.estimator)), method_mapping=MethodMapping().add(caller='fit', callee='split'))\n router.add(scorer=check_scoring(self.estimator, scoring=self.scoring), method_mapping=MethodMapping().add(caller='fit', callee='score'))\n return router", + "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.6 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\feature_selection\\_sequential.py", + "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Call Call Call Call Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "get_op_profiles", + "source_code": "def get_op_profiles(gm: torch.fx.GraphModule, ops_to_guard: set[str]) -> dict[str, set[OpProfile]]:\n\n def _get_op_profile(node: torch.fx.Node) -> OpProfile:\n args_profile = tuple([TensorMetadata.maybe_from_tensor(arg.meta.get('val')) if isinstance(arg, torch.fx.Node) else None for arg in (*node.args, *node.kwargs.values())])\n out_profile = None\n meta = node.meta.get('val')\n assert meta is not None\n if isinstance(meta, torch.Tensor):\n out_profile = TensorMetadata.maybe_from_tensor(meta)\n elif isinstance(meta, (list, tuple)):\n out_profile = tuple([TensorMetadata.maybe_from_tensor(m) for m in meta])\n assert out_profile is not None\n return OpProfile(args_profile, out_profile)\n op_profiles: dict[str, set[OpProfile]] = defaultdict(set)\n for node in gm.graph.nodes:\n if node.op == 'call_function' and str(node.target) in ops_to_guard:\n op_profiles[str(node.target)].add(_get_op_profile(node))\n return op_profiles", + "docstring": "This is used by draft_export to get a list of custom operator profiles so that we can generate fake kernels.", + "type": "function", + "file_path": "pytorch\\torch\\_export\\passes\\insert_custom_op_guards.py", + "ast_data": "FunctionDef name:get_op_profiles arg:gm arg:ops_to_guard arguments arg arg FunctionDef name:_get_op_profile arg:node arguments arg Assign Call Call Call Call Call Assign Assign Call Compare If Call Assign Call If Call Assign Call Call Compare Return return:yes Call Call For If BoolOp Compare Compare Call Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "worker_name", + "source_code": "def worker_name(self) -> Optional[str]:\n return self._worker_name", + "docstring": "Return the name of remote worker representing the remote device and `` if no worker name is available.", + "type": "method", + "file_path": "pytorch\\torch\\distributed\\remote_device.py", + "ast_data": "FunctionDef name:worker_name arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "set_window_title", + "source_code": "def set_window_title(self, title):\n self._window_title = title", + "docstring": "Set the title text of the window containing the figure. Examples -------- >>> fig = plt.figure() >>> fig.canvas.manager.set_window_title('My figure')", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", + "ast_data": "FunctionDef name:set_window_title arg:self arg:title arguments arg arg Assign" + }, + { + "library": "tensorflow", + "name": "map_structure_with_tuple_paths_up_to", + "source_code": "def map_structure_with_tuple_paths_up_to(shallow_tree, func, *inputs, **kwargs):\n return nest_util.map_structure_up_to(nest_util.Modality.CORE, shallow_tree, func, *inputs, **kwargs)", + "docstring": "Applies a function or op to a number of partially flattened inputs. Like map_structure_up_to(), except that the 'func' argument takes a path tuple as its first argument, followed by the corresponding values from *inputs. Example: Args: shallow_tree: a shallow structure, common to all the inputs. func: callable that takes args (path, inputs_0_value, ... , inputs_N_value), where path is a tuple path to an atom in shallow_tree, and inputs_i_value is the corresponding value from inputs[i]. *inputs: structures that are all structurally compatible with shallow_tree. **kwargs: kwargs to feed to func(). Special kwarg is not passed to func, but instead determines whether the types of iterables within the structures have to be same (e.g. raises a exception). To allow this set this argument to . Raises: TypeError: If is a nested structure but one of is not. TypeError: If the structure types of are different from . ValueError: If the structure lengths of are different from . Returns: Result of repeatedly applying . Has the same structure layout as .", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\util\\nest.py", + "ast_data": "FunctionDef name:map_structure_with_tuple_paths_up_to arg:shallow_tree arg:func arguments arg arg arg arg Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "convert_tensor_tf_type_to_tflite_type", + "source_code": "def convert_tensor_tf_type_to_tflite_type(tf_type: dtypes.DType, usage: str='') -> _types_pb2.IODataType:\n mapping = {dtypes.float16: _types_pb2.FLOAT16, dtypes.float32: _types_pb2.FLOAT, dtypes.float64: _types_pb2.FLOAT64, dtypes.int8: _types_pb2.INT8, dtypes.int16: _types_pb2.INT16, dtypes.uint16: _types_pb2.UINT16, dtypes.int32: _types_pb2.INT32, dtypes.int64: _types_pb2.INT64, dtypes.uint8: _types_pb2.UINT8, dtypes.uint32: _types_pb2.UINT32, dtypes.uint64: _types_pb2.UINT64, dtypes.string: _types_pb2.STRING, dtypes.bool: _types_pb2.BOOL, dtypes.complex64: _types_pb2.COMPLEX64, dtypes.complex128: _types_pb2.COMPLEX128}\n tflite_type = mapping.get(tf_type)\n if tflite_type is None:\n raise ValueError('Unsupported TensorFlow type `{0}` provided for the {1}'.format(tf_type, usage))\n return tflite_type", + "docstring": "Convert tensor type from tf type to tflite type. Args: tf_type: TensorFlow type. usage: Text describing the reason for invoking this function. Raises: ValueError: If is unsupported. Returns: tflite_type: TFLite type. Refer to compiler/mlir/lite/types.proto.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py", + "ast_data": "FunctionDef name:convert_tensor_tf_type_to_tflite_type arg:tf_type arg:usage arguments arg arg Assign Assign Call If Compare Raise Call Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "default_units", + "source_code": "@staticmethod\ndef default_units(data, axis):\n if axis.units is None:\n axis.set_units(UnitData(data))\n else:\n axis.units.update(data)\n return axis.units", + "docstring": "Set and update the units. Parameters ---------- data : str or iterable of str axis : axis on which the data is plotted Returns ------- object storing string to integer mapping", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\category.py", + "ast_data": "FunctionDef name:default_units arg:data arg:axis arguments arg arg If Compare Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "add_child", + "source_code": "def add_child(self, function_id: FunctionID, node: CUDAGraphNode) -> None:\n self.children[function_id].append(node)", + "docstring": "Adds node as a a child of self", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", + "ast_data": "FunctionDef name:add_child arg:self arg:function_id arg:node arguments arg arg arg Call" + }, + { + "library": "sphinx", + "name": "toc_metadata", + "source_code": "def toc_metadata(self, level: int, navpoints: list[NavPoint]) -> dict[str, Any]:\n metadata: dict[str, Any] = {'uid': self.config.epub_uid, 'title': html.escape(self.config.epub_title), 'level': level, 'navpoints': navpoints}\n return metadata", + "docstring": "Create a dictionary with all metadata for the toc.ncx file properly escaped.", + "type": "method", + "file_path": "sphinx\\sphinx\\builders\\_epub_base.py", + "ast_data": "FunctionDef name:toc_metadata arg:self arg:level arg:navpoints arguments arg arg arg Call Return return:yes" + }, + { + "library": "kornia", + "name": "contrast", + "source_code": "def contrast(probability: float, magnitude: int) -> OperationBase:\n magnitudes = linspace(0.1, 1.9, 11)\n return Contrast(None, probability, magnitude_range=(magnitudes[magnitude].item(), magnitudes[magnitude + 1].item()))", + "docstring": "Return contrast op.", + "type": "function", + "file_path": "kornia\\kornia\\augmentation\\auto\\autoaugment\\ops.py", + "ast_data": "FunctionDef name:contrast arg:probability arg:magnitude arguments arg arg Assign Call Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "get_module", + "source_code": "def get_module(dir_path: str, relative_to_dir: str) -> str:\n dir_path = dir_path[len(relative_to_dir):]\n dir_path = dir_path.replace(os.sep, '/')\n return dir_path.replace('/', '.').strip('.')", + "docstring": "Get module that corresponds to path relative to relative_to_dir. Args: dir_path: Path to directory. relative_to_dir: Get module relative to this directory. Returns: Name of module that corresponds to the given directory.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\generator\\generator.py", + "ast_data": "FunctionDef name:get_module arg:dir_path arg:relative_to_dir arguments arg arg Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "named_modules", + "source_code": "def named_modules(self, memo: Optional[set['Module']]=None, prefix: str='', remove_duplicate: bool=True):\n if memo is None:\n memo = set()\n if self not in memo:\n if remove_duplicate:\n memo.add(self)\n yield (prefix, self)\n for name, module in self._modules.items():\n if module is None:\n continue\n submodule_prefix = prefix + ('.' if prefix else '') + name\n yield from module.named_modules(memo, submodule_prefix, remove_duplicate)", + "docstring": "Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself. Args: memo: a memo to store the set of modules already added to the result prefix: a prefix that will be added to the name of the module remove_duplicate: whether to remove the duplicated module instances in the result or not Yields: (str, Module): Tuple of name and module Note: Duplicate modules are returned only once. In the following example, `` will be returned only once. Example:: >>> l = nn.Linear(2, 2) >>> net = nn.Sequential(l, l) >>> for idx, m in enumerate(net.named_modules()): ... print(idx, '->', m) 0 -> ('', Sequential( (0): Linear(in_features=2, out_features=2, bias=True) (1): Linear(in_features=2, out_features=2, bias=True) )) 1 -> ('0', Linear(in_features=2, out_features=2, bias=True))", + "type": "method", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:named_modules arg:self arg:memo arg:prefix arg:remove_duplicate arguments arg arg arg arg If Compare Assign Call If Compare If Call For Call If Compare Assign Call" + }, + { + "library": "tensorflow", + "name": "_is_statically_shaped", + "source_code": "def _is_statically_shaped(element_spec):\n for spec in nest.flatten(element_spec):\n if isinstance(spec, (sparse_tensor.SparseTensorSpec, ragged_tensor.RaggedTensorSpec)):\n if spec.shape.rank > 0 and spec.shape.as_list()[0] is None:\n return False\n else:\n for component in spec._flat_tensor_specs:\n if not component.shape.is_fully_defined():\n return False\n return True", + "docstring": "Test if an iterator output is statically shaped. For sparse and ragged tensors this only tests the batch dimension. Args: element_spec: a nest structure of . The element spec of the dataset of the iterator. Returns: True if the shape is static, false otherwise.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py", + "ast_data": "FunctionDef name:_is_statically_shaped arg:element_spec arguments arg For Call If Call If BoolOp Compare Compare Call Return return:yes For If Call Return return:yes Return return:yes" + }, + { + "library": "tensorflow", + "name": "scan", + "source_code": "def scan():\n if _SMCLI_TAG_SET.value and _SMCLI_OP_DENYLIST.value:\n scan_meta_graph_def(saved_model_utils.get_meta_graph_def(_SMCLI_DIR.value, _SMCLI_TAG_SET.value), _get_op_denylist_set(_SMCLI_OP_DENYLIST.value))\n elif _SMCLI_TAG_SET.value:\n scan_meta_graph_def(saved_model_utils.get_meta_graph_def(_SMCLI_DIR.value, _SMCLI_TAG_SET.value), _OP_DENYLIST)\n else:\n saved_model = saved_model_utils.read_saved_model(_SMCLI_DIR.value)\n if _SMCLI_OP_DENYLIST.value:\n for meta_graph_def in saved_model.meta_graphs:\n scan_meta_graph_def(meta_graph_def, _get_op_denylist_set(_SMCLI_OP_DENYLIST.value))\n else:\n for meta_graph_def in saved_model.meta_graphs:\n scan_meta_graph_def(meta_graph_def, _OP_DENYLIST)", + "docstring": "Function triggered by scan command.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py", + "ast_data": "FunctionDef name:scan arguments If BoolOp Call Call Call If Call Call Assign Call If For Call Call For Call" + }, + { + "library": "pandas", + "name": "axes", + "source_code": "@property\ndef axes(self) -> list[Index]:\n return [self.index]", + "docstring": "Return a list of the row axis labels.", + "type": "method", + "file_path": "pandas\\pandas\\core\\series.py", + "ast_data": "FunctionDef name:axes arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "_get_updated_graph_signature", + "source_code": "def _get_updated_graph_signature(old_signature: ExportGraphSignature, new_gm: torch.fx.GraphModule) -> ExportGraphSignature:\n new_input_specs = []\n for i, node in enumerate(new_gm.graph.nodes):\n if node.op != 'placeholder':\n break\n assert i < len(old_signature.input_specs), 'Number of inputs changed after transformation'\n old_input_spec = old_signature.input_specs[i]\n arg = old_input_spec.arg if isinstance(old_input_spec.arg, (ConstantArgument, CustomObjArgument)) else type(old_input_spec.arg)(node.name)\n new_input_specs.append(InputSpec(old_input_spec.kind, arg, old_input_spec.target, old_input_spec.persistent))\n output_node = list(new_gm.graph.nodes)[-1]\n assert output_node.op == 'output'\n new_output_specs = []\n for i, node in enumerate(output_node.args[0]):\n assert i < len(old_signature.output_specs), 'Number of outputs changed after transformation'\n old_output_spec = old_signature.output_specs[i]\n arg = old_output_spec.arg if isinstance(old_output_spec.arg, (ConstantArgument, CustomObjArgument)) else type(old_output_spec.arg)(node.name)\n new_output_specs.append(OutputSpec(old_output_spec.kind, arg, old_output_spec.target))\n new_signature = ExportGraphSignature(input_specs=new_input_specs, output_specs=new_output_specs)\n return new_signature", + "docstring": "Update the graph signature's user_input/user_outputs.", + "type": "method", + "file_path": "pytorch\\torch\\export\\exported_program.py", + "ast_data": "FunctionDef name:_get_updated_graph_signature arg:old_signature arg:new_gm arguments arg arg Assign For Call If Compare Compare Call Assign Assign Call Call Call Call Call Assign Call Compare Assign For Call Compare Call Assign Assign Call Call Call Call Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_EagerPyFuncGrad", + "source_code": "@ops.RegisterGradient('EagerPyFunc')\ndef _EagerPyFuncGrad(op, *dy):\n token = op.get_attr('token')\n\n def eagerly_executed_grad(*dy):\n tape, eager_inputs, eager_outputs = tape_cache.pop(compat.as_bytes(token))\n return tape.gradient(eager_outputs, eager_inputs, output_gradients=dy)\n with ops.control_dependencies(op.outputs):\n gradient_op = _internal_py_func(func=eagerly_executed_grad, inp=dy, Tout=[tensor.dtype for tensor in op.inputs], use_eager_py_func=True, is_grad_func=True)\n if not context.executing_eagerly():\n func = _py_funcs.get(token.decode())\n assert isinstance(func, EagerFunc), f'EagerPyFuncGrad called on a non-EagerFunc object: {func}.'\n func.set_support_graph_mode_gradient()\n return gradient_op", + "docstring": "Computes the gradient of an EagerPyFunc.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py", + "ast_data": "FunctionDef name:_EagerPyFuncGrad arg:op arguments arg arg Assign Call FunctionDef name:eagerly_executed_grad arguments arg Assign Call Call Return return:yes Call With Call Assign Call If Call Assign Call Call Call Call Return return:yes Call" + }, + { + "library": "numpy", + "name": "add_define_macros", + "source_code": "def add_define_macros(self, macros):\n dist = self.get_distribution()\n if dist is not None:\n if not hasattr(dist, 'define_macros'):\n dist.define_macros = []\n dist.define_macros.extend(macros)\n else:\n self.define_macros.extend(macros)", + "docstring": "Add define macros to configuration Add the given sequence of macro name and value duples to the beginning of the define_macros list This list will be visible to all extension modules of the current package.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\misc_util.py", + "ast_data": "FunctionDef name:add_define_macros arg:self arg:macros arguments arg arg Assign Call If Compare If Call Assign Call Call" + }, + { + "library": "tensorflow", + "name": "broadcast_recv", + "source_code": "def broadcast_recv(shape, dtype, group_size, group_key, instance_key, communication_hint='auto', timeout=0):\n if group_size <= 1:\n raise ValueError(f'Parameter `group_size` to broadcast_send must be at least 2. Received: {group_size}.')\n return gen_collective_ops.collective_bcast_recv(shape=shape, T=dtype, group_size=group_size, group_key=group_key, instance_key=instance_key, communication_hint=communication_hint.lower(), timeout_seconds=timeout)", + "docstring": "Receives a broadcasts tensor, across devices. Args: shape: Shape of the tensor to be received. dtype: Type of the tensor to be received. group_size: one plus the number of receiving tensors, i.e. the total number of devices participating. Each tensor must reside on a different device. group_key: an integer identifying the group of devices. instance_key: an integer identifying the participating group of Ops. communication_hint: preferred collective communication. The implementation may fall back to another mechanism. Options include , , and . timeout: If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. Returns: An Op implementing the broadcast receive. Raises: ValueError: if any of the input parameter constraints are not met.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\collective_ops.py", + "ast_data": "FunctionDef name:broadcast_recv arg:shape arg:dtype arg:group_size arg:group_key arg:instance_key arg:communication_hint arg:timeout arguments arg arg arg arg arg arg arg If Compare Raise Call Return return:yes Call Call" + }, + { + "library": "scikit-learn", + "name": "predict", + "source_code": "def predict(self, X):\n y = (self.decision_function(X) >= 0).astype(np.int32)\n y[y == 0] = -1\n return y", + "docstring": "Return labels (1 inlier, -1 outlier) of the samples. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Testing data. Returns ------- y : array, shape (n_samples,) Labels of the samples.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py", + "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Compare Call Assign Compare Return return:yes" + }, + { + "library": "tensorflow", + "name": "WhileBodyFuncGraph", + "source_code": "class WhileBodyFuncGraph(ControlFlowFuncGraph):\n pass", + "docstring": "FuncGraph for the body of tf.while_loop(). This is used to distinguish while bodies from other functions.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_v2_func_graphs.py", + "ast_data": "ClassDef name:WhileBodyFuncGraph" + }, + { + "library": "tensorflow", + "name": "y_key", + "source_code": "@property\ndef y_key(self):\n return (object_identity.Reference(self.y),) + self._deep_tuple(tuple(sorted(self.kwargs.items())))", + "docstring": "Returns key used for caching X=g^{-1}(Y).", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py", + "ast_data": "FunctionDef name:y_key arg:self arguments arg Return return:yes Call Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_poll_termination_signal", + "source_code": "def _poll_termination_signal(self):\n while True:\n if self._poll_termination_signal_thread_should_stop.is_set() or self._final_checkpoint_countdown:\n return\n if self._termination_watcher_fn():\n break\n time.sleep(1)\n self._maybe_set_received_own_sigterm()", + "docstring": "Poll maintenance notice and notify peers if receiving one.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py", + "ast_data": "FunctionDef name:_poll_termination_signal arg:self arguments arg While If BoolOp Call Return return:no If Call Call Call" + }, + { + "library": "tensorflow", + "name": "register_feature_column", + "source_code": "def register_feature_column(fc):\n _FEATURE_COLUMNS.append(fc)\n return fc", + "docstring": "Decorator that registers a FeatureColumn for serialization.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\serialization.py", + "ast_data": "FunctionDef name:register_feature_column arg:fc arguments arg Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_DenseColumn", + "source_code": "class _DenseColumn(_FeatureColumn):\n\n @abc.abstractproperty\n def _variable_shape(self):\n pass\n\n @abc.abstractmethod\n def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n pass", + "docstring": "Represents a column which can be represented as . WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. Some examples of this type are: numeric_column, embedding_column, indicator_column.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", + "ast_data": "ClassDef name:_DenseColumn FunctionDef name:_variable_shape arg:self arguments arg FunctionDef name:_get_dense_tensor arg:self arg:inputs arg:weight_collections arg:trainable arguments arg arg arg arg" + }, + { + "library": "pytorch", + "name": "set_standalone_module_name", + "source_code": "def set_standalone_module_name(self, module_name: str, qconfig_mapping: Optional[QConfigMapping], example_inputs: tuple[Any, ...], prepare_custom_config: Optional[PrepareCustomConfig], backend_config: Optional[BackendConfig]) -> PrepareCustomConfig:\n self.standalone_module_names[module_name] = StandaloneModuleConfigEntry(qconfig_mapping, example_inputs, prepare_custom_config, backend_config)\n return self", + "docstring": "Set the configuration for running a standalone module identified by `` will be used instead.", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py", + "ast_data": "FunctionDef name:set_standalone_module_name arg:self arg:module_name arg:qconfig_mapping arg:example_inputs arg:prepare_custom_config arg:backend_config arguments arg arg arg arg arg arg Assign Call Return return:yes" + }, + { + "library": "pytorch", + "name": "rmse", + "source_code": "def rmse(ref, res):\n return torch.sqrt(torch.mean(torch.square(ref - res)))", + "docstring": "Calculate root mean squared error", + "type": "function", + "file_path": "pytorch\\torch\\_dynamo\\utils.py", + "ast_data": "FunctionDef name:rmse arg:ref arg:res arguments arg arg Return return:yes Call Call Call" + }, + { + "library": "scikit-learn", + "name": "_encode_y", + "source_code": "@abstractmethod\ndef _encode_y(self, y=None, sample_weight=None):\n pass", + "docstring": "Called by fit to validate and encode y.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py", + "ast_data": "FunctionDef name:_encode_y arg:self arg:y arg:sample_weight arguments arg arg arg" + }, + { + "library": "tensorflow", + "name": "_calc_batch_mat_mul_flops", + "source_code": "@ops.RegisterStatistics('BatchMatMul', 'flops')\n@ops.RegisterStatistics('BatchMatMulV2', 'flops')\n@ops.RegisterStatistics('BatchMatMulV3', 'flops')\ndef _calc_batch_mat_mul_flops(graph, node):\n transpose_a = node.attr['transpose_a'].b\n a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n a_shape.assert_is_fully_defined()\n if transpose_a:\n k = int(a_shape[-2])\n else:\n k = int(a_shape[-1])\n output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n output_shape.assert_is_fully_defined()\n output_count = np.prod(output_shape.as_list())\n return ops.OpStats('flops', k * output_count * 2)", + "docstring": "Calculates the compute resources needed for BatchMatMul.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py", + "ast_data": "FunctionDef name:_calc_batch_mat_mul_flops arg:graph arg:node arguments arg arg Assign Assign Call Call If Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes Call Call Call Call" + }, + { + "library": "tensorflow", + "name": "_general_purpose_scan", + "source_code": "def _general_purpose_scan(ds, init_state, body):\n from tensorflow.python.data.ops import scan_op\n return scan_op._ScanDataset(ds, init_state, body, use_default_device=False)", + "docstring": "Variant of Dataset.scan with semantics of general-purpose computation.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_autograph.py", + "ast_data": "FunctionDef name:_general_purpose_scan arg:ds arg:init_state arg:body arguments arg arg arg Return return:yes Call" + }, + { + "library": "pytorch", + "name": "_forward_unimplemented", + "source_code": "def _forward_unimplemented(self, *input: Any) -> None:\n raise NotImplementedError(f'Module [{type(self).__name__}] is missing the required \"forward\" function')", + "docstring": "Define the computation performed at every call. Should be overridden by all subclasses. .. note:: Although the recipe for forward pass needs to be defined within this function, one should call the :class: instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.", + "type": "function", + "file_path": "pytorch\\torch\\nn\\modules\\module.py", + "ast_data": "FunctionDef name:_forward_unimplemented arg:self arguments arg arg Raise Call Call" + }, + { + "library": "tensorflow", + "name": "__call__", + "source_code": "def __call__(self, shape, dtype=None, **kwargs):\n _validate_kwargs(self.__class__.__name__, kwargs)\n dtype = _get_dtype(dtype)\n if not dtype.is_numpy_compatible or dtype == dtypes.string:\n raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype)\n if _PARTITION_SHAPE in kwargs:\n shape = kwargs[_PARTITION_SHAPE]\n return array_ops.ones(shape, dtype)", + "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. If not specified, is used, which default to unless you configured it otherwise (via ). **kwargs: Additional keyword arguments.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py", + "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If BoolOp Compare Raise Call If Compare Assign Return return:yes Call" + }, + { + "library": "pandas", + "name": "_akima_interpolate", + "source_code": "def _akima_interpolate(xi: np.ndarray, yi: np.ndarray, x: np.ndarray, der: int | list[int] | None=0, axis: AxisInt=0):\n from scipy import interpolate\n P = interpolate.Akima1DInterpolator(xi, yi, axis=axis)\n return P(x, nu=der)", + "docstring": "Convenience function for akima interpolation. xi and yi are arrays of values used to approximate some function f, with `Akima1DInterpolatoryixi`. If N-D array, use axis parameter to select correct axis. x : np.ndarray Of length M. der : int, optional How many derivatives to extract; None for all potentially nonzero derivatives (that is a number equal to the number of points), or a list of derivatives to extract. This number includes the function value as 0th derivative. axis : int, optional Axis in the yi array corresponding to the x-coordinate values. See Also -------- scipy.interpolate.Akima1DInterpolator Returns ------- y : scalar or array-like The result, of length R or length M or M by R,", + "type": "function", + "file_path": "pandas\\pandas\\core\\missing.py", + "ast_data": "FunctionDef name:_akima_interpolate arg:xi arg:yi arg:x arg:der arg:axis arguments arg arg arg arg arg Assign Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_update_notebook", + "source_code": "def _update_notebook(original_notebook, original_raw_lines, updated_code_lines):\n new_notebook = copy.deepcopy(original_notebook)\n assert len(original_raw_lines) == len(updated_code_lines), 'The lengths of input and converted files are not the same: {} vs {}'.format(len(original_raw_lines), len(updated_code_lines))\n code_cell_idx = 0\n for cell in new_notebook['cells']:\n if not is_python(cell):\n continue\n applicable_lines = [idx for idx, code_line in enumerate(original_raw_lines) if code_line.cell_number == code_cell_idx]\n new_code = [updated_code_lines[idx] for idx in applicable_lines]\n cell['source'] = '\\n'.join(new_code).replace('###!!!', '').replace('###===', '\\n')\n code_cell_idx += 1\n return new_notebook", + "docstring": "Updates notebook, once migration is done.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ipynb.py", + "ast_data": "FunctionDef name:_update_notebook arg:original_notebook arg:original_raw_lines arg:updated_code_lines arguments arg arg arg Assign Call Compare Call Call Call Call Call Assign For If Call Assign Call Compare Assign Assign Call Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "__init__", + "source_code": "def __init__(self) -> None:\n self._registered = False\n self._execution_trace_running = False\n self.extra_resources_collection = False\n self.resources_dir: str = ''\n self.output_file_path: str = ''\n self.output_file_path_observer: str = ''", + "docstring": "Initializes the default states.", + "type": "method", + "file_path": "pytorch\\torch\\profiler\\profiler.py", + "ast_data": "FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign" + }, + { + "library": "scikit-learn", + "name": "is_non_default", + "source_code": "def is_non_default(param_name, param_value):\n if param_name not in init_default_params:\n return True\n if init_default_params[param_name] == inspect._empty:\n return True\n if isinstance(param_value, BaseEstimator) and type(param_value) is not type(init_default_params[param_name]):\n return True\n if param_value != init_default_params[param_name] and (not (is_scalar_nan(init_default_params[param_name]) and is_scalar_nan(param_value))):\n return True\n return False", + "docstring": "Finds the parameters that have been set by the user.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\base.py", + "ast_data": "FunctionDef name:is_non_default arg:param_name arg:param_value arguments arg arg If Compare Return return:yes If Compare Return return:yes If BoolOp Call Compare Call Call Return return:yes If BoolOp Compare BoolOp Call Call Return return:yes Return return:yes" + }, + { + "library": "kornia", + "name": "xmax", + "source_code": "@property\ndef xmax(self) -> torch.Tensor:\n return self._data[..., 2]", + "docstring": "The bounding box bottom-right x-coordinate.", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\face_detection.py", + "ast_data": "FunctionDef name:xmax arg:self arguments arg Return return:yes" + }, + { + "library": "pytorch", + "name": "as_integer_ratio", + "source_code": "def as_integer_ratio(self) -> tuple[builtins.int, builtins.int]:\n return builtins.float(self).as_integer_ratio()", + "docstring": "Represent this float as an exact integer ratio", + "type": "method", + "file_path": "pytorch\\torch\\__init__.py", + "ast_data": "FunctionDef name:as_integer_ratio arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "_calculate_outlier_info", + "source_code": "def _calculate_outlier_info(self, percentile_ratios: torch.Tensor, counted_batches: torch.Tensor, total_batches: int) -> dict[str, list[bool]]:\n outlier_dict: dict[str, list[bool]] = {self.OUTLIER_KEY: [], self.IS_SUFFICIENT_BATCHES_KEY: []}\n ratios_list: list = percentile_ratios.tolist()\n num_batches_list: list = counted_batches.tolist()\n significant_size = [batch_size / total_batches >= self.fraction_batches_used_threshold for batch_size in num_batches_list]\n outlier_dict[self.IS_SUFFICIENT_BATCHES_KEY] = significant_size\n outlier_detected = [ratio > self.ratio_threshold for ratio in ratios_list]\n outlier_dict[self.OUTLIER_KEY] = outlier_detected\n return outlier_dict", + "docstring": "Gives info on whether the percentile ratios calculated would be considered outliers Also gives information on whether the collected data is statistically significant to make this claim Args: percentile_ratios (torch.Tensor): The average percentile_ratios per channel calculated by the observer counted_batches (torch.Tensor): The number of batches used for average calculation per tensor total_batches (int): The total number of batches that passed through observer in this epoch Returns a dictionary mapping: \"outliers_detected\" : list of bools per channel that are true if it is considered an outlier \"is_sufficient_batches\": if o_r was >= fraction_batches_used_threshold: where o_r = counted_batches / total_batches", + "type": "method", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py", + "ast_data": "FunctionDef name:_calculate_outlier_info arg:self arg:percentile_ratios arg:counted_batches arg:total_batches arguments arg arg arg arg Call Call Assign Compare Assign Assign Compare Assign Return return:yes" + }, + { + "library": "pytorch", + "name": "check_warn_on_unable_to_start_executing", + "source_code": "def check_warn_on_unable_to_start_executing(self, function_id: FunctionID) -> None:\n if function_id in self.warned_functions or not self.in_new_torch_compile_invocation():\n return\n assert self.current_node is not None\n existing_nodes = [node for node in self.current_node._path_from_root if node.wrapped_function.id == function_id]\n if len(existing_nodes) <= 1:\n return\n parents = OrderedSet([n.parent.wrapped_function.id for n in itertools.chain(existing_nodes, (self.current_node,)) if n.parent is not None])\n if len(parents) == len(existing_nodes):\n return\n self.warned_functions.add(function_id)\n warnings.warn('Unable to hit fast path of CUDAGraphs because of pending, uninvoked backwards. Consider running with torch.no_grad() or using torch.compiler.cudagraph_mark_step_begin() before each model invocation')", + "docstring": "Warn if we in a potential loop where we are unable to hit fast path", + "type": "method", + "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", + "ast_data": "FunctionDef name:check_warn_on_unable_to_start_executing arg:self arg:function_id arguments arg arg If BoolOp Compare Call Return return:no Compare Assign Compare If Compare Call Return return:no Assign Call Call Compare If Compare Call Call Return return:no Call Call" + }, + { + "library": "pytorch", + "name": "TimeitModuleType", + "source_code": "@runtime_checkable\nclass TimeitModuleType(Protocol):\n\n def timeit(self, number: int) -> float:\n ...", + "docstring": "Modules generated from .", + "type": "class", + "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\_stubs.py", + "ast_data": "ClassDef name:TimeitModuleType FunctionDef name:timeit arg:self arg:number arguments arg arg" + }, + { + "library": "tensorflow", + "name": "get_sequence_dense_tensor", + "source_code": "def get_sequence_dense_tensor(self, transformation_cache, state_manager):\n sp_tensor = transformation_cache.get(self, state_manager)\n dense_tensor = sparse_ops.sparse_tensor_to_dense(sp_tensor, default_value=self.default_value)\n dense_shape = array_ops.concat([array_ops.shape(dense_tensor)[:1], [-1], self.variable_shape], axis=0)\n dense_tensor = array_ops.reshape(dense_tensor, shape=dense_shape)\n if sp_tensor.shape.ndims == 2:\n num_elements = self.variable_shape.num_elements()\n else:\n num_elements = 1\n seq_length = fc_utils.sequence_length_from_sparse_tensor(sp_tensor, num_elements=num_elements)\n return fc.SequenceDenseColumn.TensorSequenceLengthPair(dense_tensor=dense_tensor, sequence_length=seq_length)", + "docstring": "Returns a . Args: transformation_cache: A object to access features. state_manager: A to create / access resources such as lookup tables.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py", + "ast_data": "FunctionDef name:get_sequence_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Assign Call Assign Call Assign Call Call Assign Call If Compare Assign Call Assign Assign Call Return return:yes Call" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, xy, s, size=None, prop=None, _interpolation_steps=1, usetex=False):\n from matplotlib.text import Text\n prop = FontProperties._from_any(prop)\n if size is None:\n size = prop.get_size_in_points()\n self._xy = xy\n self.set_size(size)\n self._cached_vertices = None\n s, ismath = Text(usetex=usetex)._preprocess_math(s)\n super().__init__(*text_to_path.get_text_path(prop, s, ismath=ismath), _interpolation_steps=_interpolation_steps, readonly=True)\n self._should_simplify = False", + "docstring": "Create a path from the text. Note that it simply is a path, not an artist. You need to use the (or other artists) to draw this path onto the canvas. Parameters ---------- xy : tuple or array of two float values Position of the text. For no offset, use `~matplotlib.font_manager.FontProperties.FontPropertiesrcParams/gallery/text_labels_and_annotations/demo_text_path`.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\textpath.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:xy arg:s arg:size arg:prop arg:_interpolation_steps arg:usetex arguments arg arg arg arg arg arg arg Assign Call If Compare Assign Call Assign Call Assign Assign Call Call Call Call Call Assign" + }, + { + "library": "kornia", + "name": "rgb_to_xyz", + "source_code": "def rgb_to_xyz(image: Tensor) -> Tensor:\n if not isinstance(image, Tensor):\n raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n r: Tensor = image[..., 0, :, :]\n g: Tensor = image[..., 1, :, :]\n b: Tensor = image[..., 2, :, :]\n x: Tensor = 0.412453 * r + 0.35758 * g + 0.180423 * b\n y: Tensor = 0.212671 * r + 0.71516 * g + 0.072169 * b\n z: Tensor = 0.019334 * r + 0.119193 * g + 0.950227 * b\n out: Tensor = torch.stack([x, y, z], -3)\n return out", + "docstring": "Convert a RGB image to XYZ. .. image:: _static/img/rgb_to_xyz.png Args: image: RGB Image to be converted to XYZ with shape :math:. Returns: XYZ version of the image with shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb_to_xyz(input) # 2x3x4x5", + "type": "function", + "file_path": "kornia\\kornia\\color\\xyz.py", + "ast_data": "FunctionDef name:rgb_to_xyz arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "lift_subgraph_as_module", + "source_code": "@compatibility(is_backward_compatible=False)\ndef lift_subgraph_as_module(gm: GraphModule, subgraph: Graph, comp_name: str='', class_name: str='GraphModule') -> tuple[GraphModule, dict[str, str]]:\n submodule = HolderModule({})\n orig_to_split_fqn_mapping: dict[str, str] = {}\n for n in subgraph.nodes:\n if n.op not in ('call_module', 'get_attr'):\n continue\n target = n.target\n assert isinstance(target, str)\n target_name_parts = target.split('.')\n curr = submodule\n orig_gm = gm\n for name in target_name_parts[:-1]:\n if not hasattr(curr, name):\n curr.add_module(name, HolderModule({}))\n curr = getattr(curr, name)\n orig_gm = getattr(orig_gm, name)\n leaf_node_name = target_name_parts[-1]\n leaf_node = getattr(orig_gm, leaf_node_name)\n orig_to_split_fqn_mapping[target] = f'{comp_name}.{target}'\n setattr(curr, leaf_node_name, leaf_node)\n return (GraphModule(submodule, subgraph, class_name), orig_to_split_fqn_mapping)", + "docstring": "Create a GraphModule for subgraph, which copies the necessary attributes from the original parent graph_module. Args: gm (GraphModule): parent graph module subgraph (Graph): a valid subgraph that contains copied nodes from the parent graph comp_name (str): name for the new component class_name (str): name for the submodule", + "type": "function", + "file_path": "pytorch\\torch\\fx\\passes\\utils\\common.py", + "ast_data": "FunctionDef name:lift_subgraph_as_module arg:gm arg:subgraph arg:comp_name arg:class_name arguments arg arg arg arg Assign Call For If Compare Assign Call Assign Call Assign Assign For If Call Call Call Assign Call Assign Call Assign Assign Call Assign Call Return return:yes Call Call" + }, + { + "library": "kornia", + "name": "data", + "source_code": "@property\ndef data(self) -> Tensor:\n return self._data", + "docstring": "Return the underlying data with shape :math:.", + "type": "method", + "file_path": "kornia\\kornia\\geometry\\quaternion.py", + "ast_data": "FunctionDef name:data arg:self arguments arg Return return:yes" + }, + { + "library": "matplotlib", + "name": "__init__", + "source_code": "def __init__(self, freq, tzinfo=None, **kwargs):\n kwargs['freq'] = freq\n self._base_tzinfo = tzinfo\n self._update_rrule(**kwargs)", + "docstring": "Parameters ---------- freq : {YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY} Tick frequency. These constants are defined in , but they are accessible from as well. tzinfo : , optional Time zone information. The default is None. **kwargs Additional keyword arguments are passed to the .", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\dates.py", + "ast_data": "FunctionDef name:__init__ arg:self arg:freq arg:tzinfo arguments arg arg arg arg Assign Assign Call" + }, + { + "library": "matplotlib", + "name": "HandlerPathCollection", + "source_code": "class HandlerPathCollection(HandlerRegularPolyCollection):\n\n def create_collection(self, orig_handle, sizes, offsets, offset_transform):\n return type(orig_handle)([orig_handle.get_paths()[0]], sizes=sizes, offsets=offsets, offset_transform=offset_transform)", + "docstring": "Handler for \\s, which are used by .", + "type": "class", + "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py", + "ast_data": "ClassDef name:HandlerPathCollection FunctionDef name:create_collection arg:self arg:orig_handle arg:sizes arg:offsets arg:offset_transform arguments arg arg arg arg arg Return return:yes Call Call Call" + }, + { + "library": "tensorflow", + "name": "is_ragged", + "source_code": "def is_ragged(self, axis):\n if not isinstance(axis, int):\n raise TypeError('axis must be an integer')\n rank = self.rank\n if axis < 0:\n raise ValueError('Negative axis values are not supported')\n elif rank is not None and axis >= rank:\n raise ValueError('Expected axis=%s < rank=%s' % (axis, rank))\n else:\n return axis > 0 and axis < len(self._partitioned_dim_sizes) and (self._partitioned_dim_sizes[axis].shape.ndims == 1)", + "docstring": "Returns true if the indicated dimension is ragged.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py", + "ast_data": "FunctionDef name:is_ragged arg:self arg:axis arguments arg arg If Call Raise Call Assign If Compare Raise Call If BoolOp Compare Compare Raise Call Return return:yes BoolOp Compare Compare Call Compare" + }, + { + "library": "scipy", + "name": "Python", + "source_code": "@cli.cls_cmd('python')\nclass Python:\n ctx = CONTEXT\n pythonpath = Option(['--pythonpath', '-p'], metavar='PYTHONPATH', default=None, help='Paths to prepend to PYTHONPATH')\n extra_argv = Argument(['extra_argv'], nargs=-1, metavar='ARGS', required=False)\n\n @classmethod\n def _setup(cls, pythonpath, **kwargs):\n vals = Build.opt_defaults()\n vals.update(kwargs)\n Build.run(add_path=True, **vals)\n if pythonpath:\n for p in reversed(pythonpath.split(os.pathsep)):\n sys.path.insert(0, p)\n\n @classmethod\n def run(cls, pythonpath, extra_argv=None, **kwargs):\n cls._setup(pythonpath, **kwargs)\n if extra_argv:\n sys.argv = extra_argv\n with open(extra_argv[0]) as f:\n script = f.read()\n sys.modules['__main__'] = new_module('__main__')\n ns = dict(__name__='__main__', __file__=extra_argv[0])\n exec(script, ns)\n else:\n import code\n code.interact()", + "docstring": ":wrench: Start a Python shell with PYTHONPATH set. ARGS: Arguments passed to the Python interpreter. If not set, an interactive shell is launched. Running is equivalent to: 1. Execute build command (skip by passing the global option). 2. Set the PYTHONPATH environment variable (query with ). 3. Run interpreter:", + "type": "class", + "file_path": "scipy\\dev.py", + "ast_data": "ClassDef name:Python Assign Assign Call Assign Call FunctionDef name:_setup arg:cls arg:pythonpath arguments arg arg arg Assign Call Call Call If For Call Call Call FunctionDef name:run arg:cls arg:pythonpath arg:extra_argv arguments arg arg arg arg Call If Assign With Call Assign Call Assign Call Assign Call Call Call Call" + }, + { + "library": "scikit-learn", + "name": "DensityMixin", + "source_code": "class DensityMixin:\n _estimator_type = 'DensityEstimator'\n\n def __sklearn_tags__(self):\n tags = super().__sklearn_tags__()\n tags.estimator_type = 'density_estimator'\n return tags\n\n def score(self, X, y=None):\n pass", + "docstring": "Mixin class for all density estimators in scikit-learn. This mixin defines the following functionality: - sets estimator type to through the tag; - method that default that do no-op. Examples -------- >>> from sklearn.base import DensityMixin >>> class MyEstimator(DensityMixin): ... def fit(self, X, y=None): ... self.is_fitted_ = True ... return self >>> estimator = MyEstimator() >>> hasattr(estimator, \"score\") True", + "type": "class", + "file_path": "scikit-learn\\sklearn\\base.py", + "ast_data": "ClassDef name:DensityMixin Assign FunctionDef name:__sklearn_tags__ arg:self arguments arg Assign Call Call Assign Return return:yes FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg" + }, + { + "library": "django", + "name": "get_filters_params", + "source_code": "def get_filters_params(self, params=None):\n params = params or self.filter_params\n lookup_params = params.copy()\n for ignored in IGNORED_PARAMS:\n if ignored in lookup_params:\n del lookup_params[ignored]\n return lookup_params", + "docstring": "Return all params except IGNORED_PARAMS.", + "type": "method", + "file_path": "django\\django\\contrib\\admin\\views\\main.py", + "ast_data": "FunctionDef name:get_filters_params arg:self arg:params arguments arg arg Assign BoolOp Assign Call For If Compare Return return:yes" + }, + { + "library": "pytorch", + "name": "CacheInfo", + "source_code": "@dataclasses.dataclass\nclass CacheInfo:\n artifacts: defaultdict[str, list[str]] = dataclasses.field(default_factory=lambda: defaultdict(list))\n\n @property\n def inductor_artifacts(self) -> list[str]:\n ...\n\n @property\n def autotune_artifacts(self) -> list[str]:\n ...\n\n @property\n def aot_autograd_artifacts(self) -> list[str]:\n ...\n\n @property\n def pgo_artifacts(self) -> list[str]:\n ...\n\n def add(self, artifact: CacheArtifact) -> None:\n self.artifacts[artifact.type()].append(artifact.key)\n\n def clear(self) -> None:\n self.artifacts.clear()\n\n def empty(self) -> bool:\n return not self.artifacts", + "docstring": "Return value of serialization and deserialization for the purpose of instrumentation", + "type": "class", + "file_path": "pytorch\\torch\\compiler\\_cache.py", + "ast_data": "ClassDef name:CacheInfo Call arguments Call FunctionDef name:inductor_artifacts arg:self arguments arg FunctionDef name:autotune_artifacts arg:self arguments arg FunctionDef name:aot_autograd_artifacts arg:self arguments arg FunctionDef name:pgo_artifacts arg:self arguments arg FunctionDef name:add arg:self arg:artifact arguments arg arg Call Call FunctionDef name:clear arg:self arguments arg Call FunctionDef name:empty arg:self arguments arg Return return:yes" + }, + { + "library": "kornia", + "name": "from_name", + "source_code": "@staticmethod\ndef from_name(model_name: str, num_classes: int=80) -> RTDETR:\n model = RTDETR.from_config(RTDETRConfig.from_name(model_name, num_classes))\n return model", + "docstring": "Load model without pretrained weights. Args: model_name: 'rtdetr_r18vd', 'rtdetr_r34vd', 'rtdetr_r50vd_m', 'rtdetr_r50vd', 'rtdetr_r101vd'. num_classes: number of classes to detect.", + "type": "method", + "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\model.py", + "ast_data": "FunctionDef name:from_name arg:model_name arg:num_classes arguments arg arg Assign Call Call Return return:yes" + }, + { + "library": "pytorch", + "name": "hex", + "source_code": "def hex(self) -> str:\n return self.node.guard_float('', 0).hex()", + "docstring": "Returns the hexadecimal representation of the float.", + "type": "method", + "file_path": "pytorch\\torch\\__init__.py", + "ast_data": "FunctionDef name:hex arg:self arguments arg Return return:yes Call Call" + }, + { + "library": "pytorch", + "name": "StrobelightCLIProfilerError", + "source_code": "class StrobelightCLIProfilerError(Exception):\n pass", + "docstring": "Raised when an error happens during strobelight profiling", + "type": "class", + "file_path": "pytorch\\torch\\utils\\_strobelight\\cli_function_profiler.py", + "ast_data": "ClassDef name:StrobelightCLIProfilerError" + }, + { + "library": "scikit-learn", + "name": "get_n_splits", + "source_code": "def get_n_splits(self, X=None, y=None, groups=None):\n return self.n_splits", + "docstring": "Returns the number of splitting iterations in the cross-validator. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator.", + "type": "method", + "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py", + "ast_data": "FunctionDef name:get_n_splits arg:self arg:X arg:y arg:groups arguments arg arg arg arg Return return:yes" + }, + { + "library": "authlib", + "name": "check_permission", + "source_code": "def check_permission(self, token, client, request):\n raise NotImplementedError()", + "docstring": "Check if the request has permission to introspect the token. Developers MUST implement this method:: def check_permission(self, token, client, request): # only allow a special client to introspect the token return client.client_id == \"introspection_client\" :return: bool", + "type": "method", + "file_path": "authlib\\authlib\\oauth2\\rfc7662\\introspection.py", + "ast_data": "FunctionDef name:check_permission arg:self arg:token arg:client arg:request arguments arg arg arg arg Raise Call" + }, + { + "library": "matplotlib", + "name": "to_html5_video", + "source_code": "def to_html5_video(self, embed_limit=None):\n VIDEO_TAG = ''\n if not hasattr(self, '_base64_video'):\n embed_limit = mpl._val_or_rc(embed_limit, 'animation.embed_limit')\n embed_limit *= 1024 * 1024\n with TemporaryDirectory() as tmpdir:\n path = Path(tmpdir, 'temp.m4v')\n Writer = writers[mpl.rcParams['animation.writer']]\n writer = Writer(codec='h264', bitrate=mpl.rcParams['animation.bitrate'], fps=1000.0 / self._interval)\n self.save(str(path), writer=writer)\n vid64 = base64.encodebytes(path.read_bytes())\n vid_len = len(vid64)\n if vid_len >= embed_limit:\n _log.warning(\"Animation movie is %s bytes, exceeding the limit of %s. If you're sure you want a large animation embedded, set the animation.embed_limit rc parameter to a larger value (in MB).\", vid_len, embed_limit)\n else:\n self._base64_video = vid64.decode('ascii')\n self._video_size = 'width=\"{}\" height=\"{}\"'.format(*writer.frame_size)\n if hasattr(self, '_base64_video'):\n options = ['controls', 'autoplay']\n if getattr(self, '_repeat', False):\n options.append('loop')\n return VIDEO_TAG.format(video=self._base64_video, size=self._video_size, options=' '.join(options))\n else:\n return 'Video too large to embed.'", + "docstring": "Convert the animation to an HTML5 ```animation.writeranimation.bitrateanimation.embed_limit` = 20.0. Returns ------- str An HTML5 video tag with the animation embedded as base64 encoded h264 video. If the *embed_limit* is exceeded, this returns the string \"Video too large to embed.\"", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\animation.py", + "ast_data": "FunctionDef name:to_html5_video arg:self arg:embed_limit arguments arg arg Assign If Call Assign Call With Call Assign Call Assign Assign Call Call Call Assign Call Call Assign Call If Compare Call Assign Call Assign Call If Call Assign If Call Call Return return:yes Call Call Return return:yes" + }, + { + "library": "pandas", + "name": "_validate_key", + "source_code": "def _validate_key(self, key, axis: AxisInt) -> None:\n raise AbstractMethodError(self)", + "docstring": "Ensure that key is valid for current indexer. Parameters ---------- key : scalar, slice or list-like Key requested. axis : int Dimension on which the indexing is being made. Raises ------ TypeError If the key (or some element of it) has wrong type. IndexError If the key (or some element of it) is out of bounds. KeyError If the key was not found.", + "type": "method", + "file_path": "pandas\\pandas\\core\\indexing.py", + "ast_data": "FunctionDef name:_validate_key arg:self arg:key arg:axis arguments arg arg arg Raise Call" + }, + { + "library": "scikit-learn", + "name": "_predict_proba", + "source_code": "def _predict_proba(lr, X):\n pred = safe_sparse_dot(X, lr.coef_.T)\n if hasattr(lr, 'intercept_'):\n pred += lr.intercept_\n return softmax(pred)", + "docstring": "Predict proba for lightning for n_classes >=3.", + "type": "function", + "file_path": "scikit-learn\\benchmarks\\bench_saga.py", + "ast_data": "FunctionDef name:_predict_proba arg:lr arg:X arguments arg arg Assign Call If Call Return return:yes Call" + }, + { + "library": "tensorflow", + "name": "_create_device_array", + "source_code": "def _create_device_array(shape, device_type, host_id, local_device_ids=None):\n num_global_devices = config.num_global_devices(device_type)\n global_device_ids = np.arange(num_global_devices).reshape(shape)\n local_device_list = config.local_devices(device_type)\n num_local_devices = len(local_device_list)\n local_device_ids = [x + host_id * num_local_devices for x in range(num_local_devices)] if not local_device_ids else local_device_ids\n return (global_device_ids, local_device_ids, local_device_list)", + "docstring": "Returns ID and device lists that can be used to create a mesh.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\dtensor\\python\\tpu_util.py", + "ast_data": "FunctionDef name:_create_device_array arg:shape arg:device_type arg:host_id arg:local_device_ids arguments arg arg arg arg Assign Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes" + }, + { + "library": "django", + "name": "reset_urlconf", + "source_code": "def reset_urlconf(sender, **kwargs):\n set_urlconf(None)", + "docstring": "Reset the URLconf after each request is finished.", + "type": "function", + "file_path": "django\\django\\core\\handlers\\base.py", + "ast_data": "FunctionDef name:reset_urlconf arg:sender arguments arg arg Call" + }, + { + "library": "tensorflow", + "name": "_sparse_false_negative_at_k", + "source_code": "def _sparse_false_negative_at_k(labels, predictions_idx, class_id=None, weights=None):\n with ops.name_scope(None, 'false_negatives', (predictions_idx, labels, weights)):\n labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx, class_id)\n fn = sets.set_size(sets.set_difference(predictions_idx, labels, aminusb=False))\n fn = math_ops.cast(fn, dtypes.float64)\n if weights is not None:\n with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(weights, fn),)):\n weights = math_ops.cast(weights, dtypes.float64)\n fn = math_ops.multiply(fn, weights)\n return fn", + "docstring": "Calculates false negatives for recall@k. If is specified, calculate binary true positives for only. If is not specified, calculate metrics for predicted vs label classes, where is the 2nd dimension of . Args: labels: or with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and has shape [batch_size, num_labels]. [D1, ... DN] must match . predictions_idx: 1-D or higher with last dimension , top predicted classes. For rank , the first dimensions must match . class_id: Class for which we want binary metrics. weights: whose rank is either 0, or n-1, where n is the rank of . If the latter, it must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). Returns: A [D1, ... DN] of false negative counts.", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py", + "ast_data": "FunctionDef name:_sparse_false_negative_at_k arg:labels arg:predictions_idx arg:class_id arg:weights arguments arg arg arg arg With Call Assign Call Assign Call Call Assign Call If Compare With Call Call Assign Call Assign Call Return return:yes" + }, + { + "library": "tensorflow", + "name": "_fix_fdef_in_place", + "source_code": "def _fix_fdef_in_place(fdef, functions, shared_name_suffix, new_gradient_op_types):\n orig_name = fdef.signature.name\n contains_unsaved_custom_gradients = False\n for node_def in fdef.node_def:\n fix_node_def(node_def, functions, shared_name_suffix)\n op_type = _get_gradient_op_type(node_def)\n if op_type is not None:\n if op_type in new_gradient_op_types:\n node_def.attr['_gradient_op_type'].s = compat.as_bytes(new_gradient_op_types[op_type])\n else:\n contains_unsaved_custom_gradients = True\n if contains_unsaved_custom_gradients:\n logging.warning('Importing a function (%s) with ops with unsaved custom gradients. Will likely fail if a gradient is requested.', fdef.signature.name)\n fdef.signature.name = _clean_function_name(fdef.signature.name)\n return orig_name", + "docstring": "Fixes a FunctionDef proto to be loaded in current context. In particular, when loading a function library into an eager context, one must rename the functions to avoid conflicts with existent functions. Args: fdef: FunctionDef proto to fix. It is mutated in-place. functions: map from function name to a ConcreteFunction instance. shared_name_suffix: A unique string for this load which helps to avoid collisions across loads. Two functions from the same load using the same still need to share, but functions from different loads with the same should not. new_gradient_op_types: map from old gradient op type to newly generated op type. Returns: orig_name: original value of fdef.signature.name", + "type": "function", + "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_deserialization.py", + "ast_data": "FunctionDef name:_fix_fdef_in_place arg:fdef arg:functions arg:shared_name_suffix arg:new_gradient_op_types arguments arg arg arg arg Assign Assign For Call Assign Call If Compare If Compare Assign Call Assign If Call Assign Call Return return:yes" + }, + { + "library": "scipy", + "name": "_attach_methods", + "source_code": "def _attach_methods(self):\n self._attach_argparser_methods()", + "docstring": "Attaches dynamically created argparser methods.", + "type": "method", + "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", + "ast_data": "FunctionDef name:_attach_methods arg:self arguments arg Call" + }, + { + "library": "pytorch", + "name": "_maybe_insert_input_observers_for_node", + "source_code": "def _maybe_insert_input_observers_for_node(node: Node, qconfig: QConfigAny, model: torch.nn.Module, named_modules: dict[str, torch.nn.Module], graph: Graph, qhandler: Optional[QuantizeHandler], prepare_custom_config: PrepareCustomConfig, obs_or_fq_map: dict[EdgeOrNode, ObserverOrFakeQuantize], is_qat: bool, backend_config: Optional[BackendConfig]=None) -> None:\n new_args = []\n for arg in node.args:\n new_arg = _maybe_insert_input_observer_for_arg_or_kwarg(node, arg, qconfig, model, named_modules, graph, qhandler, prepare_custom_config, obs_or_fq_map, is_qat, backend_config)\n new_args.append(new_arg)\n new_kwargs = {}\n for k, kwarg in node.kwargs.items():\n new_kwarg = _maybe_insert_input_observer_for_arg_or_kwarg(node, kwarg, qconfig, model, named_modules, graph, qhandler, prepare_custom_config, obs_or_fq_map, is_qat, backend_config)\n new_kwargs[k] = new_kwarg\n node.args = tuple(new_args)\n node.kwargs = new_kwargs", + "docstring": "If needed, inserts observers to the input args and kwargs of . Note: modifies inplace. For example, if cur_node needs an observer after prev_node, we change from prev_node -> cur_node To prev_node -> obs -> cur_node Note: backend_config only needed for standalone_module node", + "type": "function", + "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py", + "ast_data": "FunctionDef name:_maybe_insert_input_observers_for_node arg:node arg:qconfig arg:model arg:named_modules arg:graph arg:qhandler arg:prepare_custom_config arg:obs_or_fq_map arg:is_qat arg:backend_config arguments arg arg arg arg arg arg arg arg arg arg Assign For Assign Call Call Assign For Call Assign Call Assign Assign Call Assign" + }, + { + "library": "tensorflow", + "name": "LeakyReLU", + "source_code": "class LeakyReLU(Layer):\n\n def __init__(self, alpha=0.3, **kwargs):\n super(LeakyReLU, self).__init__(**kwargs)\n if alpha is None:\n raise ValueError('The alpha value of a Leaky ReLU layer cannot be None, needs a float. Got %s' % alpha)\n self.supports_masking = True\n self.alpha = backend.cast_to_floatx(alpha)\n\n def call(self, inputs):\n return backend.relu(inputs, alpha=self.alpha)\n\n def get_config(self):\n config = {'alpha': float(self.alpha)}\n base_config = super(LeakyReLU, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n return input_shape", + "docstring": "Leaky version of a Rectified Linear Unit. It allows a small gradient when the unit is not active: Usage: >>> layer = tf.keras.layers.LeakyReLU() >>> output = layer([-3.0, -1.0, 0.0, 2.0]) >>> list(output.numpy()) [-0.9, -0.3, 0.0, 2.0] >>> layer = tf.keras.layers.LeakyReLU(alpha=0.1) >>> output = layer([-3.0, -1.0, 0.0, 2.0]) >>> list(output.numpy()) [-0.3, -0.1, 0.0, 2.0] Input shape: Arbitrary. Use the keyword argument (tuple of integers, does not include the batch axis) when using this layer as the first layer in a model. Output shape: Same shape as the input. Args: alpha: Float >= 0. Negative slope coefficient. Default to 0.3.", + "type": "class", + "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\advanced_activations.py", + "ast_data": "ClassDef name:LeakyReLU FunctionDef name:__init__ arg:self arg:alpha arguments arg arg arg Call Call If Compare Raise Call Assign Assign Call FunctionDef name:call arg:self arg:inputs arguments arg arg Return return:yes Call FunctionDef name:get_config arg:self arguments arg Assign Call Assign Call Call Return return:yes Call Call Call Call Call FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Return return:yes" + }, + { + "library": "sphinx", + "name": "is_classmethod_like", + "source_code": "def is_classmethod_like(obj: Any, cls: Any=None, name: str | None=None) -> bool:\n return isclassmethod(obj, cls, name) or is_builtin_classmethod_like(obj, cls, name)", + "docstring": "Check if the object looks like a class method.", + "type": "function", + "file_path": "sphinx\\sphinx\\util\\inspect.py", + "ast_data": "FunctionDef name:is_classmethod_like arg:obj arg:cls arg:name arguments arg arg arg Return return:yes BoolOp Call Call" + }, + { + "library": "django", + "name": "_check_unique_together", + "source_code": "@classmethod\ndef _check_unique_together(cls):\n if not isinstance(cls._meta.unique_together, (tuple, list)):\n return [checks.Error(\"'unique_together' must be a list or tuple.\", obj=cls, id='models.E010')]\n elif any((not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together)):\n return [checks.Error(\"All 'unique_together' elements must be lists or tuples.\", obj=cls, id='models.E011')]\n else:\n errors = []\n for fields in cls._meta.unique_together:\n errors.extend(cls._check_local_fields(fields, 'unique_together'))\n return errors", + "docstring": "Check the value of \"unique_together\" option.", + "type": "method", + "file_path": "django\\django\\db\\models\\base.py", + "ast_data": "FunctionDef name:_check_unique_together arg:cls arguments arg If Call Return return:yes Call If Call Call Return return:yes Call Assign For Call Call Return return:yes" + }, + { + "library": "django", + "name": "I", + "source_code": "def I(self):\n if self.timezone is None:\n return ''\n return '1' if self.timezone.dst(self.data) else '0'", + "docstring": "'1' if daylight saving time, '0' otherwise.", + "type": "method", + "file_path": "django\\django\\utils\\dateformat.py", + "ast_data": "FunctionDef name:I arg:self arguments arg If Compare Return return:yes Return return:yes Call" + }, + { + "library": "numpy", + "name": "feature_test", + "source_code": "@_Cache.me\ndef feature_test(self, name, force_flags=None, macros=[]):\n if force_flags is None:\n force_flags = self.feature_flags(name)\n self.dist_log(\"testing feature '%s' with flags (%s)\" % (name, ' '.join(force_flags)))\n test_path = os.path.join(self.conf_check_path, 'cpu_%s.c' % name.lower())\n if not os.path.exists(test_path):\n self.dist_fatal('feature test file is not exist', test_path)\n test = self.dist_test(test_path, force_flags + self.cc_flags['werror'], macros=macros)\n if not test:\n self.dist_log('testing failed', stderr=True)\n return test", + "docstring": "Test a certain CPU feature against the compiler through its own check file. Parameters ---------- name : str Supported CPU feature name. force_flags : list or None, optional If None(default), the returned flags from will be used. macros : list of tuples, optional A list of C macro definitions.", + "type": "method", + "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py", + "ast_data": "FunctionDef name:feature_test arg:self arg:name arg:force_flags arg:macros arguments arg arg arg arg If Compare Assign Call Call Call Assign Call Call If Call Call Assign Call If Call Return return:yes" + }, + { + "library": "matplotlib", + "name": "transform_affine", + "source_code": "def transform_affine(self, values):\n return self.get_affine().transform(values)", + "docstring": "Apply only the affine part of this transformation on the given array of values. `~Transform.input_dims~Transform.input_dims~Transform.output_dims~Transform.output_dims`), depending on the input.", + "type": "method", + "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", + "ast_data": "FunctionDef name:transform_affine arg:self arg:values arguments arg arg Return return:yes Call Call" + }, + { + "library": "tensorflow", + "name": "pack", + "source_code": "def pack(self, tensors):\n self._assert_eager()\n if len(tensors) != len(self.components):\n raise ValueError('Creating a parallel tensor requires one tensor per component. Got {} but was expecting {}.'.format(len(tensors), len(self.components)))\n with ops.device(None):\n tensors = variable_utils.convert_variables_to_tensors(tensors)\n return nest.map_structure(self._pack_tensor, *tensors, expand_composites=True)", + "docstring": "Create a tensor on the parallel device from a sequence of tensors. Args: tensors: A list of tensors, one per device in . The list can contain composite tensors and nests (lists, dicts, etc. supported by ) with the same structure for each device, but every component of nests must already be a or composite. Passing objects reads their value, it does not share a mutable reference between the packed and unpacked forms. Returns: A tensor placed on the ParallelDevice. For nested structures, returns a single structure containing tensors placed on the ParallelDevice (same structure as each component of ). Raises: ValueError: If the length of does not match the number of component devices, or if there are non-tensor inputs.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\distribute\\parallel_device\\parallel_device.py", + "ast_data": "FunctionDef name:pack arg:self arg:tensors arguments arg arg Call If Compare Call Call Raise Call Call Call Call With Call Assign Call Return return:yes Call" + }, + { + "library": "pytorch", + "name": "run_and_read_all", + "source_code": "def run_and_read_all(run_lambda, command):\n rc, out, _ = run_lambda(command)\n if rc != 0:\n return None\n return out", + "docstring": "Run command using run_lambda; reads and returns entire output if rc is 0.", + "type": "function", + "file_path": "pytorch\\torch\\utils\\collect_env.py", + "ast_data": "FunctionDef name:run_and_read_all arg:run_lambda arg:command arguments arg arg Assign Call If Compare Return return:no Return return:yes" + }, + { + "library": "pandas", + "name": "rec_array_to_mgr", + "source_code": "def rec_array_to_mgr(data: np.rec.recarray | np.ndarray, index, columns, dtype: DtypeObj | None, copy: bool) -> Manager:\n fdata = ma.getdata(data)\n if index is None:\n index = default_index(len(fdata))\n else:\n index = ensure_index(index)\n if columns is not None:\n columns = ensure_index(columns)\n arrays, arr_columns = to_arrays(fdata, columns)\n arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, len(index))\n if columns is None:\n columns = arr_columns\n mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype)\n if copy:\n mgr = mgr.copy()\n return mgr", + "docstring": "Extract from a masked rec array and create the manager.", + "type": "function", + "file_path": "pandas\\pandas\\core\\internals\\construction.py", + "ast_data": "FunctionDef name:rec_array_to_mgr arg:data arg:index arg:columns arg:dtype arg:copy arguments arg arg arg arg arg Assign Call If Compare Assign Call Call Assign Call If Compare Assign Call Assign Call Assign Call Call If Compare Assign Assign Call If Assign Call Return return:yes" + }, + { + "library": "scikit-learn", + "name": "_scale_normalize", + "source_code": "def _scale_normalize(X):\n X = make_nonnegative(X)\n row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()\n col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()\n row_diag = np.where(np.isnan(row_diag), 0, row_diag)\n col_diag = np.where(np.isnan(col_diag), 0, col_diag)\n if issparse(X):\n n_rows, n_cols = X.shape\n r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))\n c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))\n an = r @ X @ c\n else:\n an = row_diag[:, np.newaxis] * X * col_diag\n return (an, row_diag, col_diag)", + "docstring": "Normalize `` by scaling rows and columns independently. Returns the normalized matrix and the row and column scaling factors.", + "type": "function", + "file_path": "scikit-learn\\sklearn\\cluster\\_bicluster.py", + "ast_data": "FunctionDef name:_scale_normalize arg:X arguments arg Assign Call Assign Call Call Call Call Assign Call Call Call Call Assign Call Call Assign Call Call If Call Assign Assign Call Assign Call Assign Assign Return return:yes" + }, + { + "library": "tensorflow", + "name": "get_variable", + "source_code": "def get_variable(self, feature_column, name):\n del feature_column, name\n raise NotImplementedError('StateManager.get_var')", + "docstring": "Returns an existing variable. Args: feature_column: A object this variable corresponds to. name: variable name.", + "type": "method", + "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", + "ast_data": "FunctionDef name:get_variable arg:self arg:feature_column arg:name arguments arg arg arg Raise Call" + } +] \ No newline at end of file